From c3e0a444383acf2e4aa2f0393ae036442a888c49 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 27 Sep 2019 14:12:57 -0700 Subject: xtensa: clean up empty include files Remove empty hw_irq.h and user.h from arch/xtensa/include/asm and use generic versions instead. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/Kbuild | 2 ++ arch/xtensa/include/asm/hw_irq.h | 14 -------------- arch/xtensa/include/asm/user.h | 20 -------------------- 3 files changed, 2 insertions(+), 34 deletions(-) delete mode 100644 arch/xtensa/include/asm/hw_irq.h delete mode 100644 arch/xtensa/include/asm/user.h (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index ffa0cf7f66c3..3acc31e55e02 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += exec.h generic-y += extable.h generic-y += fb.h generic-y += hardirq.h +generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h @@ -30,6 +31,7 @@ generic-y += qspinlock.h generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h +generic-y += user.h generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h deleted file mode 100644 index 3ddbea759b2b..000000000000 --- a/arch/xtensa/include/asm/hw_irq.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * include/asm-xtensa/hw_irq.h - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file "COPYING" in the main directory of - * this archive for more details. - * - * Copyright (C) 2002 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_HW_IRQ_H -#define _XTENSA_HW_IRQ_H - -#endif diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h deleted file mode 100644 index 2c3ed23354a8..000000000000 --- a/arch/xtensa/include/asm/user.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * include/asm-xtensa/user.h - * - * Xtensa Processor version. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_USER_H -#define _XTENSA_USER_H - -/* This file usually defines a 'struct user' structure. However, it it only - * used for a.out file, which are not supported on Xtensa. - */ - -#endif /* _XTENSA_USER_H */ -- cgit v1.2.3 From 6591685d50043f615a1ad7ddd5bb263ef54808fc Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 27 Sep 2019 21:28:47 -0700 Subject: xtensa: move XCHAL_KIO_* definitions to kmem_layout.h These address and size definitions define xtensa kernel memory layout, move them from vectors.h to the kmem_layout.h Signed-off-by: Max Filippov --- arch/xtensa/include/asm/kmem_layout.h | 29 ++++++++++++++++++++++++ arch/xtensa/include/asm/vectors.h | 42 +++-------------------------------- 2 files changed, 32 insertions(+), 39 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 9c12babc016c..7cbf68ca7106 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_KMEM_LAYOUT_H #define _XTENSA_KMEM_LAYOUT_H +#include #include #ifdef CONFIG_MMU @@ -65,6 +66,34 @@ #endif +/* KIO definition */ + +#if XCHAL_HAVE_PTP_MMU +#define XCHAL_KIO_CACHED_VADDR 0xe0000000 +#define XCHAL_KIO_BYPASS_VADDR 0xf0000000 +#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 +#else +#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR +#define XCHAL_KIO_DEFAULT_PADDR 0x90000000 +#endif +#define XCHAL_KIO_SIZE 0x10000000 + +#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF) +#define XCHAL_KIO_PADDR xtensa_get_kio_paddr() +#ifndef __ASSEMBLY__ +extern unsigned long xtensa_kio_paddr; + +static inline unsigned long xtensa_get_kio_paddr(void) +{ + return xtensa_kio_paddr; +} +#endif +#else +#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR +#endif + +/* KERNEL_STACK definition */ + #ifndef CONFIG_KASAN #define KERNEL_STACK_SHIFT 13 #else diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 79fe3007919e..4220c6dac44f 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -21,50 +21,14 @@ #include #include -#if XCHAL_HAVE_PTP_MMU -#define XCHAL_KIO_CACHED_VADDR 0xe0000000 -#define XCHAL_KIO_BYPASS_VADDR 0xf0000000 -#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 -#else -#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR -#define XCHAL_KIO_DEFAULT_PADDR 0x90000000 -#endif -#define XCHAL_KIO_SIZE 0x10000000 - -#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF) -#define XCHAL_KIO_PADDR xtensa_get_kio_paddr() -#ifndef __ASSEMBLY__ -extern unsigned long xtensa_kio_paddr; - -static inline unsigned long xtensa_get_kio_paddr(void) -{ - return xtensa_kio_paddr; -} -#endif -#else -#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR -#endif - -#if defined(CONFIG_MMU) - -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY -/* Image Virtual Start Address */ -#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \ - CONFIG_KERNEL_LOAD_ADDRESS - \ +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \ + XCHAL_KSEG_CACHED_VADDR - \ XCHAL_KSEG_PADDR) #else #define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS #endif -#else /* !defined(CONFIG_MMU) */ - /* MMU Not being used - Virtual == Physical */ - -/* Location of the start of the kernel text, _start */ -#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS - - -#endif /* CONFIG_MMU */ - #define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) #ifdef CONFIG_VECTORS_OFFSET #define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET) -- cgit v1.2.3 From 6af4ab570db3dc71e877271a17e5e2b337e0bdc0 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 29 Sep 2019 19:55:09 -0700 Subject: xtensa: move MPU constants from .data to .ref.rodata MPU attribute mapping table is R/O, move it from .data to __REFCONST (as the rest of the _startup code where initialize_cacheattr is used is in the __REF section). This allows executing initialize_cacheattr before the data section of the XIP kernel is relocated to its place. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/initialize_mmu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 3b054d2bede0..e3e1d9a1ef69 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -23,6 +23,7 @@ #ifndef _XTENSA_INITIALIZE_MMU_H #define _XTENSA_INITIALIZE_MMU_H +#include #include #include @@ -183,7 +184,7 @@ #endif #if XCHAL_HAVE_MPU - .data + __REFCONST .align 4 .Lattribute_table: .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00 -- cgit v1.2.3 From 9fab17ca9afe3e21c2268a742103f477316af6ec Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 27 Sep 2019 17:21:25 -0700 Subject: xtensa: fix section name for start_info .data.init.refok has been removed from the kernel long ago, replaced with __REFDATA. Fix start_info definition. Signed-off-by: Max Filippov --- arch/xtensa/kernel/head.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 4ae998b5a348..2cec13a457d7 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -355,10 +355,10 @@ ENDPROC(cpu_restart) * DATA section */ - .section ".data.init.refok" - .align 4 + __REFDATA + .align 4 ENTRY(start_info) - .long init_thread_union + KERNEL_STACK_SIZE + .long init_thread_union + KERNEL_STACK_SIZE /* * BSS section -- cgit v1.2.3 From 123b8db839b3695c8296121b9962d1a195417843 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 27 Sep 2019 17:30:05 -0700 Subject: xtensa: use correct symbol for the end of .rodata Use correct symbol for the end of .rodata section when dumping virtual memory layout. This fixes odd rodata size with XIP kernel. Signed-off-by: Max Filippov --- arch/xtensa/mm/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index d898ed67d890..19c625e6d81f 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -193,8 +193,8 @@ void __init mem_init(void) ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, (unsigned long)_text, (unsigned long)_etext, (unsigned long)(_etext - _text) >> 10, - (unsigned long)__start_rodata, (unsigned long)_sdata, - (unsigned long)(_sdata - __start_rodata) >> 10, + (unsigned long)__start_rodata, (unsigned long)__end_rodata, + (unsigned long)(__end_rodata - __start_rodata) >> 10, (unsigned long)_sdata, (unsigned long)_edata, (unsigned long)(_edata - _sdata) >> 10, (unsigned long)__init_begin, (unsigned long)__init_end, -- cgit v1.2.3 From 76743c0e0915af6ae1d960c14e8c1dcb3e238f23 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 1 Oct 2019 00:25:30 -0700 Subject: xtensa: move kernel memory layout to platform options Currently kernel memory layout settings are split between "Processor type and features" and "Platform options" menus. Consolidate them under "Platform options". Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 348 ++++++++++++++++++++++++++-------------------------- 1 file changed, 175 insertions(+), 173 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index a8e7beb6b7b5..ea9f63c7672d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -213,151 +213,6 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. -config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - bool "Initialize Xtensa MMU inside the Linux kernel code" - depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B - default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM - help - Earlier version initialized the MMU in the exception vector - before jumping to _startup in head.S and had an advantage that - it was possible to place a software breakpoint at 'reset' and - then enter your normal kernel breakpoints once the MMU was mapped - to the kernel mappings (0XC0000000). - - This unfortunately won't work for U-Boot and likely also wont - work for using KEXEC to have a hot kernel ready for doing a - KDUMP. - - So now the MMU is initialized in head.S but it's necessary to - use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup. - xt-gdb can't place a Software Breakpoint in the 0XD region prior - to mapping the MMU and after mapping even if the area of low memory - was mapped gdb wouldn't remove the breakpoint on hitting it as the - PC wouldn't match. Since Hardware Breakpoints are recommended for - Linux configurations it seems reasonable to just assume they exist - and leave this older mechanism for unfortunate souls that choose - not to follow Tensilica's recommendation. - - Selecting this will cause U-Boot to set the KERNEL Load and Entry - address at 0x00003000 instead of the mapped std of 0xD0003000. - - If in doubt, say Y. - -config MEMMAP_CACHEATTR - hex "Cache attributes for the memory address space" - depends on !MMU - default 0x22222222 - help - These cache attributes are set up for noMMU systems. Each hex digit - specifies cache attributes for the corresponding 512MB memory - region: bits 0..3 -- for addresses 0x00000000..0x1fffffff, - bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on. - - Cache attribute values are specific for the MMU type. - For region protection MMUs: - 1: WT cached, - 2: cache bypass, - 4: WB cached, - f: illegal. - For ful MMU: - bit 0: executable, - bit 1: writable, - bits 2..3: - 0: cache bypass, - 1: WB cache, - 2: WT cache, - 3: special (c and e are illegal, f is reserved). - For MPU: - 0: illegal, - 1: WB cache, - 2: WB, no-write-allocate cache, - 3: WT cache, - 4: cache bypass. - -config KSEG_PADDR - hex "Physical address of the KSEG mapping" - depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU - default 0x00000000 - help - This is the physical address where KSEG is mapped. Please refer to - the chosen KSEG layout help for the required address alignment. - Unpacked kernel image (including vectors) must be located completely - within KSEG. - Physical memory below this address is not available to linux. - - If unsure, leave the default value here. - -config KERNEL_LOAD_ADDRESS - hex "Kernel load address" - default 0x60003000 if !MMU - default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - help - This is the address where the kernel is loaded. - It is virtual address for MMUv2 configurations and physical address - for all other configurations. - - If unsure, leave the default value here. - -config VECTORS_OFFSET - hex "Kernel vectors offset" - default 0x00003000 - help - This is the offset of the kernel image from the relocatable vectors - base. - - If unsure, leave the default value here. - -choice - prompt "KSEG layout" - depends on MMU - default XTENSA_KSEG_MMU_V2 - -config XTENSA_KSEG_MMU_V2 - bool "MMUv2: 128MB cached + 128MB uncached" - help - MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting - at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000 - without cache. - KSEG_PADDR must be aligned to 128MB. - -config XTENSA_KSEG_256M - bool "256MB cached + 256MB uncached" - depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - help - TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000 - with cache and to 0xc0000000 without cache. - KSEG_PADDR must be aligned to 256MB. - -config XTENSA_KSEG_512M - bool "512MB cached + 512MB uncached" - depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - help - TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000 - with cache and to 0xc0000000 without cache. - KSEG_PADDR must be aligned to 256MB. - -endchoice - -config HIGHMEM - bool "High Memory Support" - depends on MMU - help - Linux can use the full amount of RAM in the system by - default. However, the default MMUv2 setup only maps the - lowermost 128 MB of memory linearly to the areas starting - at 0xd0000000 (cached) and 0xd8000000 (uncached). - When there are more than 128 MB memory in the system not - all of it can be "permanently mapped" by the kernel. - The physical memory that's not permanently mapped is called - "high memory". - - If you are compiling a kernel which will never run on a - machine with more than 128 MB total physical RAM, answer - N here. - - If unsure, say Y. - config FAST_SYSCALL_XTENSA bool "Enable fast atomic syscalls" default n @@ -561,34 +416,6 @@ config SIMDISK1_FILENAME Another simulated disk in a host file for a buildroot-independent storage. -config FORCE_MAX_ZONEORDER - int "Maximum zone order" - default "11" - help - The kernel memory allocator divides physically contiguous memory - blocks into "zones", where each zone is a power of two number of - pages. This option selects the largest power of two that the kernel - keeps in the memory allocator. If you need to allocate very large - blocks of physically contiguous memory, then you may need to - increase this value. - - This config option is actually maximum order plus one. For example, - a value of 11 means that the largest free memory block is 2^10 pages. - -config PLATFORM_WANT_DEFAULT_MEM - def_bool n - -config DEFAULT_MEM_START - hex - prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM - default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM - default 0x00000000 - help - This is the base address used for both PAGE_OFFSET and PHYS_OFFSET - in noMMU configurations. - - If unsure, leave the default value here. - config XTFPGA_LCD bool "Enable XTFPGA LCD driver" depends on XTENSA_PLATFORM_XTFPGA @@ -619,6 +446,181 @@ config XTFPGA_LCD_8BIT_ACCESS only be used with 8-bit interface. Please consult prototyping user guide for your board for the correct interface width. +comment "Kernel memory layout" + +config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + bool "Initialize Xtensa MMU inside the Linux kernel code" + depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B + default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM + help + Earlier version initialized the MMU in the exception vector + before jumping to _startup in head.S and had an advantage that + it was possible to place a software breakpoint at 'reset' and + then enter your normal kernel breakpoints once the MMU was mapped + to the kernel mappings (0XC0000000). + + This unfortunately won't work for U-Boot and likely also wont + work for using KEXEC to have a hot kernel ready for doing a + KDUMP. + + So now the MMU is initialized in head.S but it's necessary to + use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup. + xt-gdb can't place a Software Breakpoint in the 0XD region prior + to mapping the MMU and after mapping even if the area of low memory + was mapped gdb wouldn't remove the breakpoint on hitting it as the + PC wouldn't match. Since Hardware Breakpoints are recommended for + Linux configurations it seems reasonable to just assume they exist + and leave this older mechanism for unfortunate souls that choose + not to follow Tensilica's recommendation. + + Selecting this will cause U-Boot to set the KERNEL Load and Entry + address at 0x00003000 instead of the mapped std of 0xD0003000. + + If in doubt, say Y. + +config MEMMAP_CACHEATTR + hex "Cache attributes for the memory address space" + depends on !MMU + default 0x22222222 + help + These cache attributes are set up for noMMU systems. Each hex digit + specifies cache attributes for the corresponding 512MB memory + region: bits 0..3 -- for addresses 0x00000000..0x1fffffff, + bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on. + + Cache attribute values are specific for the MMU type. + For region protection MMUs: + 1: WT cached, + 2: cache bypass, + 4: WB cached, + f: illegal. + For ful MMU: + bit 0: executable, + bit 1: writable, + bits 2..3: + 0: cache bypass, + 1: WB cache, + 2: WT cache, + 3: special (c and e are illegal, f is reserved). + For MPU: + 0: illegal, + 1: WB cache, + 2: WB, no-write-allocate cache, + 3: WT cache, + 4: cache bypass. + +config KSEG_PADDR + hex "Physical address of the KSEG mapping" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU + default 0x00000000 + help + This is the physical address where KSEG is mapped. Please refer to + the chosen KSEG layout help for the required address alignment. + Unpacked kernel image (including vectors) must be located completely + within KSEG. + Physical memory below this address is not available to linux. + + If unsure, leave the default value here. + +config KERNEL_LOAD_ADDRESS + hex "Kernel load address" + default 0x60003000 if !MMU + default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + help + This is the address where the kernel is loaded. + It is virtual address for MMUv2 configurations and physical address + for all other configurations. + + If unsure, leave the default value here. + +config VECTORS_OFFSET + hex "Kernel vectors offset" + default 0x00003000 + help + This is the offset of the kernel image from the relocatable vectors + base. + + If unsure, leave the default value here. + +config PLATFORM_WANT_DEFAULT_MEM + def_bool n + +config DEFAULT_MEM_START + hex + prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM + default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM + default 0x00000000 + help + This is the base address used for both PAGE_OFFSET and PHYS_OFFSET + in noMMU configurations. + + If unsure, leave the default value here. + +choice + prompt "KSEG layout" + depends on MMU + default XTENSA_KSEG_MMU_V2 + +config XTENSA_KSEG_MMU_V2 + bool "MMUv2: 128MB cached + 128MB uncached" + help + MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting + at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000 + without cache. + KSEG_PADDR must be aligned to 128MB. + +config XTENSA_KSEG_256M + bool "256MB cached + 256MB uncached" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + help + TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000 + with cache and to 0xc0000000 without cache. + KSEG_PADDR must be aligned to 256MB. + +config XTENSA_KSEG_512M + bool "512MB cached + 512MB uncached" + depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + help + TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000 + with cache and to 0xc0000000 without cache. + KSEG_PADDR must be aligned to 256MB. + +endchoice + +config HIGHMEM + bool "High Memory Support" + depends on MMU + help + Linux can use the full amount of RAM in the system by + default. However, the default MMUv2 setup only maps the + lowermost 128 MB of memory linearly to the areas starting + at 0xd0000000 (cached) and 0xd8000000 (uncached). + When there are more than 128 MB memory in the system not + all of it can be "permanently mapped" by the kernel. + The physical memory that's not permanently mapped is called + "high memory". + + If you are compiling a kernel which will never run on a + machine with more than 128 MB total physical RAM, answer + N here. + + If unsure, say Y. + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + endmenu menu "Power management options" -- cgit v1.2.3 From 7af710d988775aadf440222ecbe0c10eecf3eb54 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 3 Jan 2017 17:57:51 -0800 Subject: xtensa: add XIP kernel support XIP (eXecute In Place) kernel image is the image that can be run directly from ROM, using RAM only for writable data. XIP xtensa kernel differs from regular xtensa kernel in the following ways: - it has exception/IRQ vectors merged into text section. No vectors relocation takes place at kernel startup. - .data/.bss location must be specified in the kernel configuration, its content is copied there in the _startup function. - .init.text is merged with the rest of text and is executed from ROM. - when MMU is used the virtual address where the kernel will be mapped must be specified in the kernel configuration. It may be in the KSEG or in the KIO, __pa macro is adjusted to be able to handle both. Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 48 ++++++++++++- arch/xtensa/Makefile | 3 +- arch/xtensa/boot/Makefile | 5 ++ arch/xtensa/configs/xip_kc705_defconfig | 119 ++++++++++++++++++++++++++++++++ arch/xtensa/include/asm/cache.h | 6 ++ arch/xtensa/include/asm/page.h | 11 +++ arch/xtensa/include/asm/vectors.h | 4 ++ arch/xtensa/kernel/head.S | 7 ++ arch/xtensa/kernel/setup.c | 7 ++ arch/xtensa/kernel/vmlinux.lds.S | 52 +++++++++++++- 10 files changed, 257 insertions(+), 5 deletions(-) create mode 100644 arch/xtensa/configs/xip_kc705_defconfig (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index ea9f63c7672d..bf492f9e1f75 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -19,8 +19,8 @@ config XTENSA select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_STRNCPY_FROM_USER if KASAN - select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if MMU + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL + select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_TRACEHOOK select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS @@ -299,6 +299,9 @@ config XTENSA_CALIBRATE_CCOUNT config SERIAL_CONSOLE def_bool n +config PLATFORM_HAVE_XIP + def_bool n + menu "Platform options" choice @@ -325,6 +328,7 @@ config XTENSA_PLATFORM_XTFPGA select PLATFORM_WANT_DEFAULT_MEM if !MMU select SERIAL_CONSOLE select XTENSA_CALIBRATE_CCOUNT + select PLATFORM_HAVE_XIP help XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605). This hardware is capable of running a full Linux distribution. @@ -478,6 +482,27 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX If in doubt, say Y. +config XIP_KERNEL + bool "Kernel Execute-In-Place from ROM" + depends on PLATFORM_HAVE_XIP + help + Execute-In-Place allows the kernel to run from non-volatile storage + directly addressable by the CPU, such as NOR flash. This saves RAM + space since the text section of the kernel is not loaded from flash + to RAM. Read-write sections, such as the data section and stack, + are still copied to RAM. The XIP kernel is not compressed since + it has to run directly from flash, so it will take more space to + store it. The flash address used to link the kernel object files, + and for storing it, is configuration dependent. Therefore, if you + say Y here, you must know the proper physical address where to + store the kernel image depending on your own flash memory usage. + + Also note that the make target becomes "make xipImage" rather than + "make Image" or "make uImage". The final kernel binary to put in + ROM memory will be arch/xtensa/boot/xipImage. + + If unsure, say N. + config MEMMAP_CACHEATTR hex "Cache attributes for the memory address space" depends on !MMU @@ -522,6 +547,16 @@ config KSEG_PADDR If unsure, leave the default value here. +config KERNEL_VIRTUAL_ADDRESS + hex "Kernel virtual address" + depends on MMU && XIP_KERNEL + default 0xd0003000 + help + This is the virtual address where the XIP kernel is mapped. + XIP kernel may be mapped into KSEG or KIO region, virtual address + provided here must match kernel load address provided in + KERNEL_LOAD_ADDRESS. + config KERNEL_LOAD_ADDRESS hex "Kernel load address" default 0x60003000 if !MMU @@ -537,12 +572,21 @@ config KERNEL_LOAD_ADDRESS config VECTORS_OFFSET hex "Kernel vectors offset" default 0x00003000 + depends on !XIP_KERNEL help This is the offset of the kernel image from the relocatable vectors base. If unsure, leave the default value here. +config XIP_DATA_ADDR + hex "XIP kernel data virtual address" + depends on XIP_KERNEL + default 0x00000000 + help + This is the virtual address where XIP kernel data is copied. + It must be within KSEG if MMU is used. + config PLATFORM_WANT_DEFAULT_MEM def_bool n diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 1542018c9e57..67a7d151d1e7 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/ boot := arch/xtensa/boot -all Image zImage uImage: vmlinux +all Image zImage uImage xipImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ archheaders: @@ -97,4 +97,5 @@ define archhelp @echo '* Image - Kernel ELF image with reset vector' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* uImage - U-Boot wrapped image' + @echo ' xipImage - XIP image' endef diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index 294846117fc2..efb91bfda2b4 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -29,6 +29,7 @@ all: $(boot-y) Image: boot-elf zImage: boot-redboot uImage: $(obj)/uImage +xipImage: $(obj)/xipImage boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) @@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip $(obj)/uImage: vmlinux.bin.gz FORCE $(call if_changed,uimage) $(Q)$(kecho) ' Kernel: $@ is ready' + +$(obj)/xipImage: vmlinux FORCE + $(call if_changed,objcopy) + $(Q)$(kecho) ' Kernel: $@ is ready' diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig new file mode 100644 index 000000000000..f9e85c082afc --- /dev/null +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -0,0 +1,119 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_MEMCG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_XTENSA_VARIANT_DC233C=y +CONFIG_XTENSA_UNALIGNED_USER=y +CONFIG_XIP_KERNEL=y +CONFIG_XIP_DATA_ADDR=0xd0000000 +CONFIG_KERNEL_VIRTUAL_ADDRESS=0xe6000000 +CONFIG_KERNEL_LOAD_ADDRESS=0xf6000000 +CONFIG_XTENSA_KSEG_512M=y +CONFIG_HIGHMEM=y +CONFIG_XTENSA_PLATFORM_XTFPGA=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" +CONFIG_USE_OF=y +CONFIG_BUILTIN_DTB_SOURCE="kc705" +# CONFIG_PARSE_BOOTPARAM is not set +CONFIG_OPROFILE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_COMPACTION is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_DEVKMEM=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_NOWAYOUT=y +CONFIG_SOFT_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT3_FS=y +CONFIG_FANOTIFY=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_STACKTRACE=y +CONFIG_RCU_TRACE=y +# CONFIG_FTRACE is not set +# CONFIG_S32C1I_SELFTEST is not set diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h index b21fd133ff62..54e147ac26bf 100644 --- a/arch/xtensa/include/asm/cache.h +++ b/arch/xtensa/include/asm/cache.h @@ -31,4 +31,10 @@ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES +/* + * R/O after init is actually writable, it cannot go to .rodata + * according to vmlinux linker script. + */ +#define __ro_after_init __read_mostly + #endif /* _XTENSA_CACHE_H */ diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 09c56cba442e..f4771c29c7e9 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va) if (off >= XCHAL_KSEG_SIZE) off -= XCHAL_KSEG_SIZE; +#ifndef CONFIG_XIP_KERNEL return off + PHYS_OFFSET; +#else + if (off < XCHAL_KSEG_SIZE) + return off + PHYS_OFFSET; + + off -= XCHAL_KSEG_SIZE; + if (off >= XCHAL_KIO_SIZE) + off -= XCHAL_KIO_SIZE; + + return off + XCHAL_KIO_PADDR; +#endif } #define __pa(x) ___pa((unsigned long)(x)) #else diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 4220c6dac44f..fd99b25037a7 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h @@ -22,9 +22,13 @@ #include #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS +#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS +#else #define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \ XCHAL_KSEG_CACHED_VADDR - \ XCHAL_KSEG_PADDR) +#endif #else #define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS #endif diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 2cec13a457d7..e0c1fac0910f 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -260,6 +260,13 @@ ENTRY(_startup) ___invalidate_icache_all a2 a3 isync +#ifdef CONFIG_XIP_KERNEL + /* Setup bootstrap CPU stack in XIP kernel */ + + movi a1, start_info + l32i a1, a1, 0 +#endif + movi a6, 0 xsr a6, excsave1 diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index e0e1e1892b86..0f93b67c7a5a 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end; extern char _SecondaryResetVector_text_start; extern char _SecondaryResetVector_text_end; #endif +#ifdef CONFIG_XIP_KERNEL +extern char _xip_start[]; +extern char _xip_end[]; +#endif static inline int __init_memblock mem_reserve(unsigned long start, unsigned long end) @@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p) #endif mem_reserve(__pa(_stext), __pa(_end)); +#ifdef CONFIG_XIP_KERNEL + mem_reserve(__pa(_xip_start), __pa(_xip_end)); +#endif #ifdef CONFIG_VECTORS_OFFSET mem_reserve(__pa(&_WindowVectors_text_start), diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 943f10639a93..01e3112cdb27 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -134,6 +134,9 @@ SECTIONS NOTES /* Data section */ +#ifdef CONFIG_XIP_KERNEL + INIT_TEXT_SECTION(PAGE_SIZE) +#else _sdata = .; RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; @@ -147,6 +150,11 @@ SECTIONS .init.data : { INIT_DATA + } +#endif + + .init.rodata : + { . = ALIGN(0x4); __tagtable_begin = .; *(.taglist) @@ -187,12 +195,16 @@ SECTIONS RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); #endif +#ifdef CONFIG_XIP_KERNEL + RELOCATE_ENTRY(_xip_data, .data); + RELOCATE_ENTRY(_xip_init_data, .init.data); +#else #if defined(CONFIG_SMP) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); +#endif #endif - __boot_reloc_table_end = ABSOLUTE(.) ; INIT_SETUP(XCHAL_ICACHE_LINESIZE) @@ -278,7 +290,7 @@ SECTIONS . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; #endif -#if defined(CONFIG_SMP) +#if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP) SECTION_VECTOR (_SecondaryResetVector_text, .SecondaryResetVector.text, @@ -291,12 +303,48 @@ SECTIONS . = ALIGN(PAGE_SIZE); +#ifndef CONFIG_XIP_KERNEL __init_end = .; BSS_SECTION(0, 8192, 0) +#endif _end = .; +#ifdef CONFIG_XIP_KERNEL + . = CONFIG_XIP_DATA_ADDR; + + _xip_start = .; + +#undef LOAD_OFFSET +#define LOAD_OFFSET \ + (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3) + + _xip_data_start = .; + _sdata = .; + RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) + _edata = .; + _xip_data_end = .; + + /* Initialization data: */ + + STRUCT_ALIGN(); + + _xip_init_data_start = .; + __init_begin = .; + .init.data : + { + INIT_DATA + } + _xip_init_data_end = .; + __init_end = .; + BSS_SECTION(0, 8192, 0) + + _xip_end = .; + +#undef LOAD_OFFSET +#endif + DWARF_DEBUG .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) } -- cgit v1.2.3 From f5fae6790fd3199e45ead10f7004311abdf539e5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 14 Oct 2019 11:33:44 -0700 Subject: xtensa: merge .fixup with .text Section .fixup contains pieces of code, merge it with the rest of the .text section. Signed-off-by: Max Filippov --- arch/xtensa/kernel/vmlinux.lds.S | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 01e3112cdb27..c64abc15d38f 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -117,7 +117,7 @@ SECTIONS SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT - + *(.fixup) } _etext = .; PROVIDE (etext = .); @@ -126,10 +126,6 @@ SECTIONS RODATA - /* Relocation table */ - - .fixup : { *(.fixup) } - EXCEPTION_TABLE(16) NOTES /* Data section */ -- cgit v1.2.3 From cbc6e28703c44a321e9d8a8894ec11bc6e7e473d Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Oct 2019 14:03:03 -0700 Subject: xtensa: use "m" constraint instead of "a" in uaccess.h assembly Use "m" constraint instead of "r" for the address, as "m" allows compiler to access adjacent locations using base + offset, while "r" requires updating the base register every time. Use %[mem] * 0 + v to replace offset part of %[mem] expansion with v. It is impossible to change address alignment through the offset part on xtensa, so just ignore offset in alignment checks. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/uaccess.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 3f80386f1883..47b7702aaa40 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -132,13 +132,13 @@ do { \ #define __check_align_1 "" #define __check_align_2 \ - " _bbci.l %[addr], 0, 1f \n" \ + " _bbci.l %[mem] * 0, 1f \n" \ " movi %[err], %[efault] \n" \ " _j 2f \n" #define __check_align_4 \ - " _bbsi.l %[addr], 0, 0f \n" \ - " _bbci.l %[addr], 1, 1f \n" \ + " _bbsi.l %[mem] * 0, 0f \n" \ + " _bbci.l %[mem] * 0 + 1, 1f \n" \ "0: movi %[err], %[efault] \n" \ " _j 2f \n" @@ -154,7 +154,7 @@ do { \ #define __put_user_asm(x_, addr_, err_, align, insn, cb)\ __asm__ __volatile__( \ __check_align_##align \ - "1: "insn" %[x], %[addr], 0 \n" \ + "1: "insn" %[x], %[mem] \n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ @@ -167,8 +167,8 @@ __asm__ __volatile__( \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ " .previous" \ - :[err] "+r"(err_), [tmp] "=r"(cb) \ - :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT)) + :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \ + :[x] "r"(x_), [efault] "i"(-EFAULT)) #define __get_user_nocheck(x, ptr, size) \ ({ \ @@ -222,7 +222,7 @@ do { \ u32 __x = 0; \ __asm__ __volatile__( \ __check_align_##align \ - "1: "insn" %[x], %[addr], 0 \n" \ + "1: "insn" %[x], %[mem] \n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ @@ -236,7 +236,7 @@ do { \ " .long 1b, 5b \n" \ " .previous" \ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \ - :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \ + :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \ (x_) = (__force __typeof__(*(addr_)))__x; \ } while (0) -- cgit v1.2.3 From b387dc044efaa07cd8a47316c83fe2a5c08f9650 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Oct 2019 22:04:13 -0700 Subject: xtensa: use macros to generate *_bit and test_and_*_bit functions Parameterize macros with function name, opcode and inversion pattern. This reduces code duplication removing 2/3 of definitions. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/bitops.h | 321 +++++++++++---------------------------- 1 file changed, 92 insertions(+), 229 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index be8b2be5a98b..bfaad56870f6 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -98,247 +98,110 @@ static inline unsigned long __fls(unsigned long word) #if XCHAL_HAVE_EXCLUSIVE -static inline void set_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %0, %2\n" - " or %0, %0, %1\n" - " s32ex %0, %2\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp) - : "a" (mask), "a" (p) - : "memory"); -} - -static inline void clear_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %0, %2\n" - " and %0, %0, %1\n" - " s32ex %0, %2\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp) - : "a" (~mask), "a" (p) - : "memory"); -} - -static inline void change_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %0, %2\n" - " xor %0, %0, %1\n" - " s32ex %0, %2\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp) - : "a" (mask), "a" (p) - : "memory"); -} - -static inline int -test_and_set_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %1, %3\n" - " or %0, %1, %2\n" - " s32ex %0, %3\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); - - return value & mask; -} - -static inline int -test_and_clear_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %1, %3\n" - " and %0, %1, %2\n" - " s32ex %0, %3\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (~mask), "a" (p) - : "memory"); - - return value & mask; -} - -static inline int -test_and_change_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32ex %1, %3\n" - " xor %0, %1, %2\n" - " s32ex %0, %3\n" - " getex %0\n" - " beqz %0, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); - - return value & mask; +#define BIT_OP(op, insn, inv) \ +static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ +{ \ + unsigned long tmp; \ + unsigned long mask = 1UL << (bit & 31); \ + \ + p += bit >> 5; \ + \ + __asm__ __volatile__( \ + "1: l32ex %0, %2\n" \ + " "insn" %0, %0, %1\n" \ + " s32ex %0, %2\n" \ + " getex %0\n" \ + " beqz %0, 1b\n" \ + : "=&a" (tmp) \ + : "a" (inv mask), "a" (p) \ + : "memory"); \ +} + +#define TEST_AND_BIT_OP(op, insn, inv) \ +static inline int \ +test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ +{ \ + unsigned long tmp, value; \ + unsigned long mask = 1UL << (bit & 31); \ + \ + p += bit >> 5; \ + \ + __asm__ __volatile__( \ + "1: l32ex %1, %3\n" \ + " "insn" %0, %1, %2\n" \ + " s32ex %0, %3\n" \ + " getex %0\n" \ + " beqz %0, 1b\n" \ + : "=&a" (tmp), "=&a" (value) \ + : "a" (inv mask), "a" (p) \ + : "memory"); \ + \ + return value & mask; \ } #elif XCHAL_HAVE_S32C1I -static inline void set_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); -} - -static inline void clear_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (~mask), "a" (p) - : "memory"); +#define BIT_OP(op, insn, inv) \ +static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ +{ \ + unsigned long tmp, value; \ + unsigned long mask = 1UL << (bit & 31); \ + \ + p += bit >> 5; \ + \ + __asm__ __volatile__( \ + "1: l32i %1, %3, 0\n" \ + " wsr %1, scompare1\n" \ + " "insn" %0, %1, %2\n" \ + " s32c1i %0, %3, 0\n" \ + " bne %0, %1, 1b\n" \ + : "=&a" (tmp), "=&a" (value) \ + : "a" (inv mask), "a" (p) \ + : "memory"); \ +} + +#define TEST_AND_BIT_OP(op, insn, inv) \ +static inline int \ +test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ +{ \ + unsigned long tmp, value; \ + unsigned long mask = 1UL << (bit & 31); \ + \ + p += bit >> 5; \ + \ + __asm__ __volatile__( \ + "1: l32i %1, %3, 0\n" \ + " wsr %1, scompare1\n" \ + " "insn" %0, %1, %2\n" \ + " s32c1i %0, %3, 0\n" \ + " bne %0, %1, 1b\n" \ + : "=&a" (tmp), "=&a" (value) \ + : "a" (inv mask), "a" (p) \ + : "memory"); \ + \ + return tmp & mask; \ } -static inline void change_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " xor %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); -} +#else -static inline int -test_and_set_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); - - return tmp & mask; -} +#define BIT_OP(op, insn, inv) +#define TEST_AND_BIT_OP(op, insn, inv) -static inline int -test_and_clear_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (~mask), "a" (p) - : "memory"); - - return tmp & mask; -} +#include -static inline int -test_and_change_bit(unsigned int bit, volatile unsigned long *p) -{ - unsigned long tmp, value; - unsigned long mask = 1UL << (bit & 31); - - p += bit >> 5; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " xor %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp), "=&a" (value) - : "a" (mask), "a" (p) - : "memory"); - - return tmp & mask; -} +#endif /* XCHAL_HAVE_S32C1I */ -#else +#define BIT_OPS(op, insn, inv) \ + BIT_OP(op, insn, inv) \ + TEST_AND_BIT_OP(op, insn, inv) -#include +BIT_OPS(set, "or", ) +BIT_OPS(clear, "and", ~) +BIT_OPS(change, "xor", ) -#endif /* XCHAL_HAVE_S32C1I */ +#undef BIT_OPS +#undef BIT_OP +#undef TEST_AND_BIT_OP #include #include -- cgit v1.2.3 From e444917019258ef3bd564d0b4a432add2c26a2ae Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Oct 2019 22:14:15 -0700 Subject: xtensa: use named assembly arguments in bitops.h Numeric assembly arguments are hard to understand and assembly code that uses them is hard to modify. Use named arguments in BIT_OP and TEST_AND_BIT_OP macros. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/bitops.h | 56 ++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index bfaad56870f6..5a35d026c1c3 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -107,13 +107,13 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32ex %0, %2\n" \ - " "insn" %0, %0, %1\n" \ - " s32ex %0, %2\n" \ - " getex %0\n" \ - " beqz %0, 1b\n" \ - : "=&a" (tmp) \ - : "a" (inv mask), "a" (p) \ + "1: l32ex %[tmp], %[addr]\n" \ + " "insn" %[tmp], %[tmp], %[mask]\n" \ + " s32ex %[tmp], %[addr]\n" \ + " getex %[tmp]\n" \ + " beqz %[tmp], 1b\n" \ + : [tmp] "=&a" (tmp) \ + : [mask] "a" (inv mask), [addr] "a" (p) \ : "memory"); \ } @@ -127,13 +127,13 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32ex %1, %3\n" \ - " "insn" %0, %1, %2\n" \ - " s32ex %0, %3\n" \ - " getex %0\n" \ - " beqz %0, 1b\n" \ - : "=&a" (tmp), "=&a" (value) \ - : "a" (inv mask), "a" (p) \ + "1: l32ex %[value], %[addr]\n" \ + " "insn" %[tmp], %[value], %[mask]\n" \ + " s32ex %[tmp], %[addr]\n" \ + " getex %[tmp]\n" \ + " beqz %[tmp], 1b\n" \ + : [tmp] "=&a" (tmp), [value] "=&a" (value) \ + : [mask] "a" (inv mask), [addr] "a" (p) \ : "memory"); \ \ return value & mask; \ @@ -150,13 +150,13 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " "insn" %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (tmp), "=&a" (value) \ - : "a" (inv mask), "a" (p) \ + "1: l32i %[value], %[addr], 0\n" \ + " wsr %[value], scompare1\n" \ + " "insn" %[tmp], %[value], %[mask]\n" \ + " s32c1i %[tmp], %[addr], 0\n" \ + " bne %[tmp], %[value], 1b\n" \ + : [tmp] "=&a" (tmp), [value] "=&a" (value) \ + : [mask] "a" (inv mask), [addr] "a" (p) \ : "memory"); \ } @@ -170,13 +170,13 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " "insn" %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (tmp), "=&a" (value) \ - : "a" (inv mask), "a" (p) \ + "1: l32i %[value], %[addr], 0\n" \ + " wsr %[value], scompare1\n" \ + " "insn" %[tmp], %[value], %[mask]\n" \ + " s32c1i %[tmp], %[addr], 0\n" \ + " bne %[tmp], %[value], 1b\n" \ + : [tmp] "=&a" (tmp), [value] "=&a" (value) \ + : [mask] "a" (inv mask), [addr] "a" (p) \ : "memory"); \ \ return tmp & mask; \ -- cgit v1.2.3 From 5bf67094a3a2d99d5f96db30be286f6c41988177 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 15 Oct 2019 22:17:33 -0700 Subject: xtensa: use "m" constraint instead of "a" in bitops.h assembly Use "m" constraint instead of "r" for the address, as "m" allows compiler to access adjacent locations using base + offset, while "r" requires updating the base register every time. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/bitops.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5a35d026c1c3..3f71d364ba90 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -150,13 +150,14 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32i %[value], %[addr], 0\n" \ + "1: l32i %[value], %[mem]\n" \ " wsr %[value], scompare1\n" \ " "insn" %[tmp], %[value], %[mask]\n" \ - " s32c1i %[tmp], %[addr], 0\n" \ + " s32c1i %[tmp], %[mem]\n" \ " bne %[tmp], %[value], 1b\n" \ - : [tmp] "=&a" (tmp), [value] "=&a" (value) \ - : [mask] "a" (inv mask), [addr] "a" (p) \ + : [tmp] "=&a" (tmp), [value] "=&a" (value), \ + [mem] "+m" (*p) \ + : [mask] "a" (inv mask) \ : "memory"); \ } @@ -170,13 +171,14 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \ p += bit >> 5; \ \ __asm__ __volatile__( \ - "1: l32i %[value], %[addr], 0\n" \ + "1: l32i %[value], %[mem]\n" \ " wsr %[value], scompare1\n" \ " "insn" %[tmp], %[value], %[mask]\n" \ - " s32c1i %[tmp], %[addr], 0\n" \ + " s32c1i %[tmp], %[mem]\n" \ " bne %[tmp], %[value], 1b\n" \ - : [tmp] "=&a" (tmp), [value] "=&a" (value) \ - : [mask] "a" (inv mask), [addr] "a" (p) \ + : [tmp] "=&a" (tmp), [value] "=&a" (value), \ + [mem] "+m" (*p) \ + : [mask] "a" (inv mask) \ : "memory"); \ \ return tmp & mask; \ -- cgit v1.2.3 From 643d6976ff0b950fc6e5d65adfda497ba792b149 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 16 Oct 2019 00:33:10 -0700 Subject: xtensa: use named assembly arguments in atomic.h Numeric assembly arguments are hard to understand and assembly code that uses them is hard to modify. Use named arguments in ATOMIC_OP, ATOMIC_OP_RETURN and ATOMIC_FETCH_OP macros. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/atomic.h | 120 +++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 60 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 7b00d26f472e..6bc5309a2ce8 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32ex %1, %3\n" \ - " " #op " %0, %1, %2\n" \ - " s32ex %0, %3\n" \ - " getex %0\n" \ - " beqz %0, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ } \ @@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32ex %1, %3\n" \ - " " #op " %0, %1, %2\n" \ - " s32ex %0, %3\n" \ - " getex %0\n" \ - " beqz %0, 1b\n" \ - " " #op " %0, %1, %2\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ \ @@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32ex %1, %3\n" \ - " " #op " %0, %1, %2\n" \ - " s32ex %0, %3\n" \ - " getex %0\n" \ - " beqz %0, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32ex %[tmp], %[addr]\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32ex %[result], %[addr]\n" \ + " getex %[result]\n" \ + " beqz %[result], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ \ @@ -124,13 +124,13 @@ static inline void atomic_##op(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32i %[tmp], %[addr], 0\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[addr], 0\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ } \ @@ -142,14 +142,14 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - " " #op " %0, %0, %2\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32i %[tmp], %[addr], 0\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[addr], 0\n" \ + " bne %[result], %[tmp], 1b\n" \ + " " #op " %[result], %[result], %[i]\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ \ @@ -163,13 +163,13 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %1, %3, 0\n" \ - " wsr %1, scompare1\n" \ - " " #op " %0, %1, %2\n" \ - " s32c1i %0, %3, 0\n" \ - " bne %0, %1, 1b\n" \ - : "=&a" (result), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + "1: l32i %[tmp], %[addr], 0\n" \ + " wsr %[tmp], scompare1\n" \ + " " #op " %[result], %[tmp], %[i]\n" \ + " s32c1i %[result], %[addr], 0\n" \ + " bne %[result], %[tmp], 1b\n" \ + : [result] "=&a" (result), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "memory" \ ); \ \ @@ -184,14 +184,14 @@ static inline void atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(TOPLEVEL)"\n"\ - " l32i %0, %2, 0\n" \ - " " #op " %0, %0, %1\n" \ - " s32i %0, %2, 0\n" \ + " rsil a15, "__stringify(TOPLEVEL)"\n" \ + " l32i %[result], %[addr], 0\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[addr], 0\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : "=&a" (vval) \ - : "a" (i), "a" (v) \ + : [result] "=&a" (vval) \ + : [i] "a" (i), [addr] "a" (v) \ : "a15", "memory" \ ); \ } \ @@ -203,13 +203,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ \ __asm__ __volatile__( \ " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %0, %2, 0\n" \ - " " #op " %0, %0, %1\n" \ - " s32i %0, %2, 0\n" \ + " l32i %[result], %[addr], 0\n" \ + " " #op " %[result], %[result], %[i]\n" \ + " s32i %[result], %[addr], 0\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : "=&a" (vval) \ - : "a" (i), "a" (v) \ + : [result] "=&a" (vval) \ + : [i] "a" (i), [addr] "a" (v) \ : "a15", "memory" \ ); \ \ @@ -223,13 +223,13 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ \ __asm__ __volatile__( \ " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %0, %3, 0\n" \ - " " #op " %1, %0, %2\n" \ - " s32i %1, %3, 0\n" \ + " l32i %[result], %[addr], 0\n" \ + " " #op " %[tmp], %[result], %[i]\n" \ + " s32i %[tmp], %[addr], 0\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : "=&a" (vval), "=&a" (tmp) \ - : "a" (i), "a" (v) \ + : [result] "=&a" (vval), [tmp] "=&a" (tmp) \ + : [i] "a" (i), [addr] "a" (v) \ : "a15", "memory" \ ); \ \ -- cgit v1.2.3 From 13e28135d6fb4906af9cd1d54f22172ad5e4a0dd Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 16 Oct 2019 00:49:54 -0700 Subject: xtensa: use "m" constraint instead of "a" in atomic.h assembly Use "m" constraint instead of "r" for the address, as "m" allows compiler to access adjacent locations using base + offset, while "r" requires updating the base register every time. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/atomic.h | 52 +++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 24 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 6bc5309a2ce8..3e7c6134ed32 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %[tmp], %[addr], 0\n" \ + "1: l32i %[tmp], %[mem]\n" \ " wsr %[tmp], scompare1\n" \ " " #op " %[result], %[tmp], %[i]\n" \ - " s32c1i %[result], %[addr], 0\n" \ + " s32c1i %[result], %[mem]\n" \ " bne %[result], %[tmp], 1b\n" \ - : [result] "=&a" (result), [tmp] "=&a" (tmp) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ : "memory" \ ); \ } \ @@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %[tmp], %[addr], 0\n" \ + "1: l32i %[tmp], %[mem]\n" \ " wsr %[tmp], scompare1\n" \ " " #op " %[result], %[tmp], %[i]\n" \ - " s32c1i %[result], %[addr], 0\n" \ + " s32c1i %[result], %[mem]\n" \ " bne %[result], %[tmp], 1b\n" \ " " #op " %[result], %[result], %[i]\n" \ - : [result] "=&a" (result), [tmp] "=&a" (tmp) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ : "memory" \ ); \ \ @@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ int result; \ \ __asm__ __volatile__( \ - "1: l32i %[tmp], %[addr], 0\n" \ + "1: l32i %[tmp], %[mem]\n" \ " wsr %[tmp], scompare1\n" \ " " #op " %[result], %[tmp], %[i]\n" \ - " s32c1i %[result], %[addr], 0\n" \ + " s32c1i %[result], %[mem]\n" \ " bne %[result], %[tmp], 1b\n" \ - : [result] "=&a" (result), [tmp] "=&a" (tmp) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (result), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ : "memory" \ ); \ \ @@ -185,13 +188,13 @@ static inline void atomic_##op(int i, atomic_t * v) \ \ __asm__ __volatile__( \ " rsil a15, "__stringify(TOPLEVEL)"\n" \ - " l32i %[result], %[addr], 0\n" \ + " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ - " s32i %[result], %[addr], 0\n" \ + " s32i %[result], %[mem]\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : [result] "=&a" (vval) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ : "a15", "memory" \ ); \ } \ @@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ \ __asm__ __volatile__( \ " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %[result], %[addr], 0\n" \ + " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ - " s32i %[result], %[addr], 0\n" \ + " s32i %[result], %[mem]\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : [result] "=&a" (vval) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (vval), [mem] "+m" (*v) \ + : [i] "a" (i) \ : "a15", "memory" \ ); \ \ @@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \ \ __asm__ __volatile__( \ " rsil a15,"__stringify(TOPLEVEL)"\n" \ - " l32i %[result], %[addr], 0\n" \ + " l32i %[result], %[mem]\n" \ " " #op " %[tmp], %[result], %[i]\n" \ - " s32i %[tmp], %[addr], 0\n" \ + " s32i %[tmp], %[mem]\n" \ " wsr a15, ps\n" \ " rsync\n" \ - : [result] "=&a" (vval), [tmp] "=&a" (tmp) \ - : [i] "a" (i), [addr] "a" (v) \ + : [result] "=&a" (vval), [tmp] "=&a" (tmp), \ + [mem] "+m" (*v) \ + : [i] "a" (i) \ : "a15", "memory" \ ); \ \ -- cgit v1.2.3 From 812e708a4c2d29664a009805671d98cbe7c756b1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 16 Oct 2019 01:52:38 -0700 Subject: xtensa: use named assembly arguments in cmpxchg.h Numeric assembly arguments are hard to understand and assembly code that uses them is hard to modify. Use named arguments in __cmpxchg_u32 and xchg_u32. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/cmpxchg.h | 70 +++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 35 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 7ccc5cbf441b..0d4fc56337c8 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new) unsigned long tmp, result; __asm__ __volatile__( - "1: l32ex %0, %3\n" - " bne %0, %4, 2f\n" - " mov %1, %2\n" - " s32ex %1, %3\n" - " getex %1\n" - " beqz %1, 1b\n" + "1: l32ex %[result], %[addr]\n" + " bne %[result], %[cmp], 2f\n" + " mov %[tmp], %[new]\n" + " s32ex %[tmp], %[addr]\n" + " getex %[tmp]\n" + " beqz %[tmp], 1b\n" "2:\n" - : "=&a" (result), "=&a" (tmp) - : "a" (new), "a" (p), "a" (old) + : [result] "=&a" (result), [tmp] "=&a" (tmp) + : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old) : "memory" ); return result; #elif XCHAL_HAVE_S32C1I __asm__ __volatile__( - " wsr %2, scompare1\n" - " s32c1i %0, %1, 0\n" - : "+a" (new) - : "a" (p), "a" (old) + " wsr %[cmp], scompare1\n" + " s32c1i %[new], %[addr], 0\n" + : [new] "+a" (new) + : [addr] "a" (p), [cmp] "a" (old) : "memory" ); @@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new) #else __asm__ __volatile__( " rsil a15, "__stringify(TOPLEVEL)"\n" - " l32i %0, %1, 0\n" - " bne %0, %2, 1f\n" - " s32i %3, %1, 0\n" + " l32i %[old], %[addr], 0\n" + " bne %[old], %[cmp], 1f\n" + " s32i %[new], %[addr], 0\n" "1:\n" " wsr a15, ps\n" " rsync\n" - : "=&a" (old) - : "a" (p), "a" (old), "r" (new) + : [old] "=&a" (old) + : [addr] "a" (p), [cmp] "a" (old), [new] "r" (new) : "a15", "memory"); return old; #endif @@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) unsigned long tmp, result; __asm__ __volatile__( - "1: l32ex %0, %3\n" - " mov %1, %2\n" - " s32ex %1, %3\n" - " getex %1\n" - " beqz %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (val), "a" (m) + "1: l32ex %[result], %[addr]\n" + " mov %[tmp], %[val]\n" + " s32ex %[tmp], %[addr]\n" + " getex %[tmp]\n" + " beqz %[tmp], 1b\n" + : [result] "=&a" (result), [tmp] "=&a" (tmp) + : [val] "a" (val), [addr] "a" (m) : "memory" ); @@ -143,13 +143,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #elif XCHAL_HAVE_S32C1I unsigned long tmp, result; __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " mov %0, %3\n" - " wsr %1, scompare1\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (m), "a" (val) + "1: l32i %[tmp], %[addr], 0\n" + " mov %[result], %[val]\n" + " wsr %[tmp], scompare1\n" + " s32c1i %[result], %[addr], 0\n" + " bne %[result], %[tmp], 1b\n" + : [result] "=&a" (result), [tmp] "=&a" (tmp) + : [addr] "a" (m), [val] "a" (val) : "memory" ); return result; @@ -157,12 +157,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) unsigned long tmp; __asm__ __volatile__( " rsil a15, "__stringify(TOPLEVEL)"\n" - " l32i %0, %1, 0\n" - " s32i %2, %1, 0\n" + " l32i %[tmp], %[addr], 0\n" + " s32i %[val], %[addr], 0\n" " wsr a15, ps\n" " rsync\n" - : "=&a" (tmp) - : "a" (m), "a" (val) + : [tmp] "=&a" (tmp) + : [addr] "a" (m), [val] "a" (val) : "a15", "memory"); return tmp; #endif -- cgit v1.2.3 From cf3b3baa712517c4972339b150f79fa88099e5db Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 16 Oct 2019 00:49:54 -0700 Subject: xtensa: use "m" constraint instead of "a" in cmpxchg.h assembly Use "m" constraint instead of "r" for the address, as "m" allows compiler to access adjacent locations using base + offset, while "r" requires updating the base register every time. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/cmpxchg.h | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 0d4fc56337c8..a175f8aec3fb 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -43,9 +43,9 @@ __cmpxchg_u32(volatile int *p, int old, int new) #elif XCHAL_HAVE_S32C1I __asm__ __volatile__( " wsr %[cmp], scompare1\n" - " s32c1i %[new], %[addr], 0\n" - : [new] "+a" (new) - : [addr] "a" (p), [cmp] "a" (old) + " s32c1i %[new], %[mem]\n" + : [new] "+a" (new), [mem] "+m" (*p) + : [cmp] "a" (old) : "memory" ); @@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new) #else __asm__ __volatile__( " rsil a15, "__stringify(TOPLEVEL)"\n" - " l32i %[old], %[addr], 0\n" + " l32i %[old], %[mem]\n" " bne %[old], %[cmp], 1f\n" - " s32i %[new], %[addr], 0\n" + " s32i %[new], %[mem]\n" "1:\n" " wsr a15, ps\n" " rsync\n" - : [old] "=&a" (old) - : [addr] "a" (p), [cmp] "a" (old), [new] "r" (new) + : [old] "=&a" (old), [mem] "+m" (*p) + : [cmp] "a" (old), [new] "r" (new) : "a15", "memory"); return old; #endif @@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #elif XCHAL_HAVE_S32C1I unsigned long tmp, result; __asm__ __volatile__( - "1: l32i %[tmp], %[addr], 0\n" + "1: l32i %[tmp], %[mem]\n" " mov %[result], %[val]\n" " wsr %[tmp], scompare1\n" - " s32c1i %[result], %[addr], 0\n" + " s32c1i %[result], %[mem]\n" " bne %[result], %[tmp], 1b\n" - : [result] "=&a" (result), [tmp] "=&a" (tmp) - : [addr] "a" (m), [val] "a" (val) + : [result] "=&a" (result), [tmp] "=&a" (tmp), + [mem] "+m" (*m) + : [val] "a" (val) : "memory" ); return result; @@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) unsigned long tmp; __asm__ __volatile__( " rsil a15, "__stringify(TOPLEVEL)"\n" - " l32i %[tmp], %[addr], 0\n" - " s32i %[val], %[addr], 0\n" + " l32i %[tmp], %[mem]\n" + " s32i %[val], %[mem]\n" " wsr a15, ps\n" " rsync\n" - : [tmp] "=&a" (tmp) - : [addr] "a" (m), [val] "a" (val) + : [tmp] "=&a" (tmp), [mem] "+m" (*m) + : [val] "a" (val) : "a15", "memory"); return tmp; #endif -- cgit v1.2.3 From 5eff6ca2e39662114675e7cca6a01e15d6c0b5d1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 16 Oct 2019 00:49:54 -0700 Subject: xtensa: use "m" constraint instead of "r" in futex.h assembly Use "m" constraint instead of "r" for the address, as "m" allows compiler to access adjacent locations using base + offset, while "r" requires updating the base register every time. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/futex.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index 0c4457ca0a85..964611083224 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -43,10 +43,10 @@ #elif XCHAL_HAVE_S32C1I #define __futex_atomic_op(insn, ret, old, uaddr, arg) \ __asm__ __volatile( \ - "1: l32i %[oldval], %[addr], 0\n" \ + "1: l32i %[oldval], %[mem]\n" \ insn "\n" \ " wsr %[oldval], scompare1\n" \ - "2: s32c1i %[newval], %[addr], 0\n" \ + "2: s32c1i %[newval], %[mem]\n" \ " bne %[newval], %[oldval], 1b\n" \ " movi %[newval], 0\n" \ "3:\n" \ @@ -60,9 +60,9 @@ " .section __ex_table,\"a\"\n" \ " .long 1b, 5b, 2b, 5b\n" \ " .previous\n" \ - : [oldval] "=&r" (old), [newval] "=&r" (ret) \ - : [addr] "r" (uaddr), [oparg] "r" (arg), \ - [fault] "I" (-EFAULT) \ + : [oldval] "=&r" (old), [newval] "=&r" (ret), \ + [mem] "+m" (*(uaddr)) \ + : [oparg] "r" (arg), [fault] "I" (-EFAULT) \ : "memory") #endif -- cgit v1.2.3 From c5fccebc138b1f5a5b57acecdbf1530e41ebea17 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 12 Nov 2019 08:47:48 -0800 Subject: xtensa: improve stack dumping Calculate printable stack size and use print_hex_dump instead of opencoding it. Drop extra newline output in show_trace as its output format does not depend on CONFIG_KALLSYMS. Reviewed-by: Petr Mladek Signed-off-by: Max Filippov --- arch/xtensa/kernel/traps.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 4a6c495ce9b6..be26ec6c0e0e 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp) pr_info("Call Trace:\n"); walk_stackframe(sp, show_trace_cb, NULL); -#ifndef CONFIG_KALLSYMS - pr_cont("\n"); -#endif } -static int kstack_depth_to_print = 24; +#define STACK_DUMP_ENTRY_SIZE 4 +#define STACK_DUMP_LINE_SIZE 32 +static size_t kstack_depth_to_print = 24; void show_stack(struct task_struct *task, unsigned long *sp) { - int i = 0; - unsigned long *stack; + size_t len; if (!sp) sp = stack_pointer(task); - stack = sp; - pr_info("Stack:\n"); + len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE), + kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); - for (i = 0; i < kstack_depth_to_print; i++) { - if (kstack_end(sp)) - break; - pr_cont(" %08lx", *sp++); - if (i % 8 == 7) - pr_cont("\n"); - } - show_trace(task, stack); + pr_info("Stack:\n"); + print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, + STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, + sp, len, false); + show_trace(task, sp); } DEFINE_SPINLOCK(die_lock); -- cgit v1.2.3 From 8951eb1530ddf83dbb815d38e97afddc6a0d1140 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 12 Nov 2019 08:43:25 -0800 Subject: xtensa: make stack dump size configurable Introduce Kconfig symbol PRINT_STACK_DEPTH and use it to initialize kstack_depth_to_print. Reviewed-by: Petr Mladek Signed-off-by: Max Filippov --- arch/xtensa/Kconfig.debug | 7 +++++++ arch/xtensa/kernel/traps.c | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index 39de98e20018..83cc8d12fa0e 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -31,3 +31,10 @@ config S32C1I_SELFTEST It is easy to make wrong hardware configuration, this test should catch it early. Say 'N' on stable hardware. + +config PRINT_STACK_DEPTH + int "Stack depth to print" if DEBUG_KERNEL + default 64 + help + This option allows you to set the stack depth that the kernel + prints in stack traces. diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index be26ec6c0e0e..87bd68dd7687 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -495,7 +495,7 @@ void show_trace(struct task_struct *task, unsigned long *sp) #define STACK_DUMP_ENTRY_SIZE 4 #define STACK_DUMP_LINE_SIZE 32 -static size_t kstack_depth_to_print = 24; +static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *task, unsigned long *sp) { -- cgit v1.2.3 From f0d1eab8c2e1f9240cf4ae4753d7947c65e60bd7 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 5 Nov 2019 16:33:19 +0200 Subject: xtensa: mm: fix PMD folding implementation There was a definition of pmd_offset() in arch/xtensa/include/asm/pgtable.h that shadowed the generic implementation defined in include/asm-generic/pgtable-nopmd.h. As the result, xtensa had shortcuts in page table traversal in several places instead of doing level unfolding. Remove local override for pmd_offset() and add page table unfolding where necessary. Signed-off-by: Mike Rapoport Message-Id: <1572964400-16542-2-git-send-email-rppt@kernel.org> Signed-off-by: Max Filippov --- arch/xtensa/include/asm/pgtable.h | 3 --- arch/xtensa/mm/fault.c | 10 ++++++++-- arch/xtensa/mm/kasan_init.c | 6 ++++-- arch/xtensa/mm/mmu.c | 3 ++- arch/xtensa/mm/tlb.c | 6 +++++- 5 files changed, 19 insertions(+), 9 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 3f7fe5a8c286..af72f02004f1 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -371,9 +371,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define pgd_index(address) ((address) >> PGDIR_SHIFT) -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir,address) ((pmd_t*)(dir)) - /* Find an entry in the third-level page table.. */ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir,addr) \ diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index f81b1478da61..68a041402025 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -197,6 +197,7 @@ vmalloc_fault: struct mm_struct *act_mm = current->active_mm; int index = pgd_index(address); pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; @@ -211,8 +212,13 @@ vmalloc_fault: pgd_val(*pgd) = pgd_val(*pgd_k); - pmd = pmd_offset(pgd, address); - pmd_k = pmd_offset(pgd_k, address); + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud) || !pud_present(*pud_k)) + goto bad_page_fault; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) goto bad_page_fault; diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index af7152560bc3..ace98bdf5191 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -20,7 +20,8 @@ void __init kasan_early_init(void) { unsigned long vaddr = KASAN_SHADOW_START; pgd_t *pgd = pgd_offset_k(vaddr); - pmd_t *pmd = pmd_offset(pgd, vaddr); + pud_t *pud = pud_offset(pgd, vaddr); + pmd_t *pmd = pmd_offset(pud, vaddr); int i; for (i = 0; i < PTRS_PER_PTE; ++i) @@ -42,7 +43,8 @@ static void __init populate(void *start, void *end) unsigned long i, j; unsigned long vaddr = (unsigned long)start; pgd_t *pgd = pgd_offset_k(vaddr); - pmd_t *pmd = pmd_offset(pgd, vaddr); + pud_t *pud = pud_offset(pgd, vaddr); + pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 03678c4afc39..018dda2c6a91 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -22,7 +22,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pgd_t *pgd = pgd_offset_k(vaddr); - pmd_t *pmd = pmd_offset(pgd, vaddr); + pud_t *pud = pud_offset(pgd, vaddr); + pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; unsigned long i; diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 59153d0aa890..164a2ca07a1f 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -169,6 +169,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) struct task_struct *task = get_current(); struct mm_struct *mm = task->mm; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -177,7 +178,10 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) pgd = pgd_offset(mm, vaddr); if (pgd_none_or_clear_bad(pgd)) return 0; - pmd = pmd_offset(pgd, vaddr); + pud = pud_offset(pgd, vaddr); + if (pud_none_or_clear_bad(pud)) + return 0; + pmd = pmd_offset(pud, vaddr); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_map(pmd, vaddr); -- cgit v1.2.3 From f5ee2567921dec4f489c16d4fe22c3a7222d0ce6 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 5 Nov 2019 16:33:20 +0200 Subject: xtensa: get rid of __ARCH_USE_5LEVEL_HACK xtensa has 2-level page tables and already uses pgtable-nopmd for page table folding. Add walks of p4d level where appropriate and drop usage of __ARCH_USE_5LEVEL_HACK. Signed-off-by: Mike Rapoport Message-Id: <1572964400-16542-3-git-send-email-rppt@kernel.org> Signed-off-by: Max Filippov [fix up arch/xtensa/include/asm/fixmap.h and arch/xtensa/mm/tlb.c] --- arch/xtensa/include/asm/fixmap.h | 8 +++++--- arch/xtensa/include/asm/pgtable.h | 1 - arch/xtensa/mm/fault.c | 10 ++++++++-- arch/xtensa/mm/kasan_init.c | 6 ++++-- arch/xtensa/mm/mmu.c | 3 ++- arch/xtensa/mm/tlb.c | 6 +++++- 6 files changed, 24 insertions(+), 10 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 7e25c1b50ac0..cfb8696917e9 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel( \ - pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ - (vaddr) \ - ) + pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \ + (vaddr)), \ + (vaddr)), \ + (vaddr)), \ + (vaddr)) #endif diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index af72f02004f1..27ac17c9da09 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -8,7 +8,6 @@ #ifndef _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H -#define __ARCH_USE_5LEVEL_HACK #include #include #include diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 68a041402025..bee30a77cd70 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -197,6 +197,7 @@ vmalloc_fault: struct mm_struct *act_mm = current->active_mm; int index = pgd_index(address); pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; @@ -212,8 +213,13 @@ vmalloc_fault: pgd_val(*pgd) = pgd_val(*pgd_k); - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) + goto bad_page_fault; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); if (!pud_present(*pud) || !pud_present(*pud_k)) goto bad_page_fault; diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index ace98bdf5191..9c957791bb33 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -20,7 +20,8 @@ void __init kasan_early_init(void) { unsigned long vaddr = KASAN_SHADOW_START; pgd_t *pgd = pgd_offset_k(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); + p4d_t *p4d = p4d_offset(pgd, vaddr); + pud_t *pud = pud_offset(p4d, vaddr); pmd_t *pmd = pmd_offset(pud, vaddr); int i; @@ -43,7 +44,8 @@ static void __init populate(void *start, void *end) unsigned long i, j; unsigned long vaddr = (unsigned long)start; pgd_t *pgd = pgd_offset_k(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); + p4d_t *p4d = p4d_offset(pgd, vaddr); + pud_t *pud = pud_offset(p4d, vaddr); pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 018dda2c6a91..37e478a27877 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -22,7 +22,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pgd_t *pgd = pgd_offset_k(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); + p4d_t *p4d = p4d_offset(pgd, vaddr); + pud_t *pud = pud_offset(p4d, vaddr); pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; unsigned long i; diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 164a2ca07a1f..ec8220973252 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -169,6 +169,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) struct task_struct *task = get_current(); struct mm_struct *mm = task->mm; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -178,7 +179,10 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) pgd = pgd_offset(mm, vaddr); if (pgd_none_or_clear_bad(pgd)) return 0; - pud = pud_offset(pgd, vaddr); + p4d = p4d_offset(pgd, vaddr); + if (p4d_none_or_clear_bad(p4d)) + return 0; + pud = pud_offset(p4d, vaddr); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, vaddr); -- cgit v1.2.3 From 36de10c4788efc6efe6ff9aa10d38cb7eea4c818 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 13 Nov 2019 13:18:31 -0800 Subject: xtensa: fix TLB sanity checker Virtual and translated addresses retrieved by the xtensa TLB sanity checker must be consistent, i.e. correspond to the same state of the checked TLB entry. KASAN shadow memory is mapped dynamically using auto-refill TLB entries and thus may change TLB state between the virtual and translated address retrieval, resulting in false TLB insanity report. Move read_xtlb_translation close to read_xtlb_virtual to make sure that read values are consistent. Cc: stable@vger.kernel.org Fixes: a99e07ee5e88 ("xtensa: check TLB sanity on return to userspace") Signed-off-by: Max Filippov --- arch/xtensa/mm/tlb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index ec8220973252..f436cf2efd8b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -224,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned r0 = dtlb ? read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); + unsigned r1 = dtlb ? + read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned pte = get_pte_for_vaddr(vpn); unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; @@ -239,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) } if (tlb_asid == mm_asid) { - unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : - read_itlb_translation(tlbidx); if ((pte ^ r1) & PAGE_MASK) { pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", dtlb ? 'D' : 'I', w, e, r0, r1, pte); -- cgit v1.2.3 From e64681b487c897ec871465083bf0874087d47b66 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 13 Nov 2019 16:06:42 -0800 Subject: xtensa: use MEMBLOCK_ALLOC_ANYWHERE for KASAN shadow map KASAN shadow map doesn't need to be accessible through the linear kernel mapping, allocate its pages with MEMBLOCK_ALLOC_ANYWHERE so that high memory can be used. This frees up to ~100MB of low memory on xtensa configurations with KASAN and high memory. Cc: stable@vger.kernel.org # v5.1+ Fixes: f240ec09bb8a ("memblock: replace memblock_alloc_base(ANYWHERE) with memblock_phys_alloc") Reviewed-by: Mike Rapoport Signed-off-by: Max Filippov --- arch/xtensa/mm/kasan_init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index 9c957791bb33..e3baa21ff24c 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -60,7 +60,9 @@ static void __init populate(void *start, void *end) for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { phys_addr_t phys = - memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, + 0, + MEMBLOCK_ALLOC_ANYWHERE); if (!phys) panic("Failed to allocate page table page\n"); -- cgit v1.2.3 From 8b5d7e5242de1db7c08335a512ad260fb3cd0b39 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Mon, 23 Sep 2019 15:36:20 +0100 Subject: xtensa: entry: Remove unneeded need_resched() loop Since the enabling and disabling of IRQs within preempt_schedule_irq() is contained in a need_resched() loop, we don't need the outer arch code loop. Acked-by: Max Filippov Signed-off-by: Valentin Schneider Message-Id: <20190923143620.29334-10-valentin.schneider@arm.com> Signed-off-by: Max Filippov --- arch/xtensa/kernel/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 9e3676879168..2ca209e71565 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -529,7 +529,7 @@ common_exception_return: l32i a4, a2, TI_PRE_COUNT bnez a4, 4f call4 preempt_schedule_irq - j 1b + j 4f #endif #if XTENSA_FAKE_NMI -- cgit v1.2.3 From d80a505348472093cce6698b837c2b83b5e90390 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 25 Nov 2019 12:57:02 -0800 Subject: xtensa: drop unneeded headers from coprocessor.S A bunch of irrelevant headers is included into coprocessor.S. Remove them and add necessary asm/regs.h. Signed-off-by: Max Filippov --- arch/xtensa/kernel/coprocessor.S | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 80828b95a51f..bb8e499b9900 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -15,17 +15,9 @@ #include #include #include -#include #include -#include -#include -#include -#include #include -#include -#include -#include -#include +#include #if XTENSA_HAVE_COPROCESSORS -- cgit v1.2.3 From c2d9aa3b6e56de56c7f1ed9026ca6ec7cfbeef19 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 14 Nov 2019 15:05:40 -0800 Subject: xtensa: fix syscall_set_return_value syscall return value is in the register a2, not a0. Cc: stable@vger.kernel.org # v5.0+ Fixes: 9f24f3c1067c ("xtensa: implement tracehook functions and enable HAVE_ARCH_TRACEHOOK") Signed-off-by: Max Filippov --- arch/xtensa/include/asm/syscall.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 359ab40e935a..c90fb944f9d8 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->areg[0] = (long) error ? error : val; + regs->areg[2] = (long) error ? error : val; } #define SYSCALL_MAX_ARGS 6 -- cgit v1.2.3 From ba9c1d65991a8be2e160447dd06eb803cbb9ba61 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 25 Nov 2019 11:51:45 -0800 Subject: xtensa: rearrange syscall tracing system_call saves and restores syscall number across system call to make clone and execv entry and exit tracing match. This complicates things when syscall code may be changed by ptrace. Preserve syscall code in copy_thread and start_thread directly instead of doing tricks in system_call. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/processor.h | 3 ++- arch/xtensa/kernel/entry.S | 6 ------ arch/xtensa/kernel/process.c | 2 ++ 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 7495520d7a3e..6fa903daf2a2 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -195,6 +195,7 @@ struct thread_struct { /* Clearing a0 terminates the backtrace. */ #define start_thread(regs, new_pc, new_sp) \ do { \ + unsigned long syscall = (regs)->syscall; \ memset((regs), 0, sizeof(*(regs))); \ (regs)->pc = (new_pc); \ (regs)->ps = USER_PS_VALUE; \ @@ -204,7 +205,7 @@ struct thread_struct { (regs)->depc = 0; \ (regs)->windowbase = 0; \ (regs)->windowstart = 1; \ - (regs)->syscall = NO_SYSCALL; \ + (regs)->syscall = syscall; \ } while (0) /* Forward declaration */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 2ca209e71565..59af494d9940 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1895,8 +1895,6 @@ ENTRY(system_call) l32i a7, a2, PT_SYSCALL 1: - s32i a7, a1, 4 - /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table @@ -1930,12 +1928,8 @@ ENTRY(system_call) abi_ret(4) 1: - l32i a4, a1, 4 - l32i a3, a2, PT_SYSCALL - s32i a4, a2, PT_SYSCALL mov a6, a2 call4 do_syscall_trace_leave - s32i a3, a2, PT_SYSCALL abi_ret(4) ENDPROC(system_call) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index db278a9e80c7..9e1c49134c07 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, ®s->areg[XCHAL_NUM_AREGS - len/4], len); } + childregs->syscall = regs->syscall; + /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) childregs->threadptr = childregs->areg[5]; -- cgit v1.2.3 From 02ce94c229251555ac726ecfebe3458ef5905fa9 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 29 Nov 2019 14:54:06 -0800 Subject: xtensa: fix system_call interaction with ptrace Don't overwrite return value if system call was cancelled at entry by ptrace. Return status code from do_syscall_trace_enter so that pt_regs::syscall doesn't need to be changed to skip syscall. Signed-off-by: Max Filippov --- arch/xtensa/kernel/entry.S | 4 ++-- arch/xtensa/kernel/ptrace.c | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 59af494d9940..138469e26560 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1892,6 +1892,7 @@ ENTRY(system_call) mov a6, a2 call4 do_syscall_trace_enter + beqz a6, .Lsyscall_exit l32i a7, a2, PT_SYSCALL 1: @@ -1904,8 +1905,6 @@ ENTRY(system_call) addx4 a4, a7, a4 l32i a4, a4, 0 - movi a5, sys_ni_syscall; - beq a4, a5, 1f /* Load args: arg0 - arg5 are passed via regs. */ @@ -1925,6 +1924,7 @@ ENTRY(system_call) s32i a6, a2, PT_AREG2 bnez a3, 1f +.Lsyscall_exit: abi_ret(4) 1: diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index b964f0b2d886..145742d70a9f 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -void do_syscall_trace_enter(struct pt_regs *regs) +void do_syscall_trace_leave(struct pt_regs *regs); +int do_syscall_trace_enter(struct pt_regs *regs) { + if (regs->syscall == NO_SYSCALL) + regs->areg[2] = -ENOSYS; + if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + tracehook_report_syscall_entry(regs)) { + regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; + return 0; + } + + if (regs->syscall == NO_SYSCALL) { + do_syscall_trace_leave(regs); + return 0; + } if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); + + return 1; } void do_syscall_trace_leave(struct pt_regs *regs) -- cgit v1.2.3 From 9d9043f6a81713248d82d88983c06b1eaedda287 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 29 Nov 2019 01:25:20 -0800 Subject: xtensa: clean up system_call/xtensa_rt_sigreturn interaction system_call assembly code always pushes pointer to struct pt_regs as the last additional parameter for all system calls. The only user of this feature is xtensa_rt_sigreturn. Avoid this special case. Define xtensa_rt_sigreturn as accepting no argiments. Use current_pt_regs to get pointer to struct pt_regs in xtensa_rt_sigreturn. Don't pass additional parameter from system_call code. Signed-off-by: Max Filippov --- arch/xtensa/include/asm/syscall.h | 2 +- arch/xtensa/kernel/entry.S | 10 +++------- arch/xtensa/kernel/signal.c | 4 ++-- 3 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch/xtensa') diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index c90fb944f9d8..f9a671cbf933 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->areg[reg[i]] = args[i]; } -asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); +asmlinkage long xtensa_rt_sigreturn(void); asmlinkage long xtensa_shmat(int, char __user *, int); asmlinkage long xtensa_fadvise64_64(int, int, unsigned long long, unsigned long long); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 138469e26560..be897803834a 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited) ENTRY(system_call) - /* reserve 4 bytes on stack for function parameter */ - abi_entry(4) + abi_entry_default /* regs->syscall = regs->areg[2] */ @@ -1915,9 +1914,6 @@ ENTRY(system_call) l32i a10, a2, PT_AREG8 l32i a11, a2, PT_AREG9 - /* Pass one additional argument to the syscall: pt_regs (on stack) */ - s32i a2, a1, 0 - callx4 a4 1: /* regs->areg[2] = return_value */ @@ -1925,12 +1921,12 @@ ENTRY(system_call) s32i a6, a2, PT_AREG2 bnez a3, 1f .Lsyscall_exit: - abi_ret(4) + abi_ret_default 1: mov a6, a2 call4 do_syscall_trace_leave - abi_ret(4) + abi_ret_default ENDPROC(system_call) diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index dae83cddd6ca..76cee341507b 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) * Do a signal return; undo the signal stack. */ -asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, - long a4, long a5, struct pt_regs *regs) +asmlinkage long xtensa_rt_sigreturn(void) { + struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; sigset_t set; int ret; -- cgit v1.2.3