diff options
Diffstat (limited to 'arch/sparc')
34 files changed, 131 insertions, 445 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index efeff2c896a5..a6ca135442f9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -23,6 +23,7 @@ config SPARC select HAVE_OPROFILE select HAVE_ARCH_KGDB if !SMP || SPARC64 select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_SECCOMP if SPARC64 select HAVE_EXIT_THREAD select HAVE_PCI select SYSCTL_EXCEPTION_TRACE @@ -43,12 +44,14 @@ config SPARC select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA select PCI_SYSCALL if PCI + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select ODD_RT_SIGACTION select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS select LOCKDEP_SMALL if LOCKDEP select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH + select SET_FS config SPARC32 def_bool !64BIT @@ -226,23 +229,6 @@ config EARLYFB help Say Y here to enable a faster early framebuffer boot console. -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on SPARC64 && PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SPARC64 && SMP diff --git a/arch/sparc/include/asm/checksum.h b/arch/sparc/include/asm/checksum.h index a6256cb6fc5c..f2ac13323b6d 100644 --- a/arch/sparc/include/asm/checksum.h +++ b/arch/sparc/include/asm/checksum.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_CHECKSUM_H #define ___ASM_SPARC_CHECKSUM_H +#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define HAVE_CSUM_COPY_USER #if defined(__sparc__) && defined(__arch64__) #include <asm/checksum_64.h> #else diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index 479a0b812af5..ce11e0ad80c7 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); static inline __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { register unsigned int ret asm("o0") = (unsigned int)src; register char *d asm("o1") = dst; @@ -50,9 +50,9 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) __asm__ __volatile__ ( "call __csum_partial_copy_sparc_generic\n\t" - " mov %6, %%g7\n" + " mov -1, %%g7\n" : "=&r" (ret), "=&r" (d), "=&r" (l) - : "0" (ret), "1" (d), "2" (l), "r" (sum) + : "0" (ret), "1" (d), "2" (l) : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", "g7", "memory", "cc"); @@ -60,65 +60,19 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) } static inline __wsum -csum_and_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *err) - { - register unsigned long ret asm("o0") = (unsigned long)src; - register char *d asm("o1") = dst; - register int l asm("g1") = len; - register __wsum s asm("g7") = sum; - - if (unlikely(!access_ok(src, len))) { - if (len) - *err = -EFAULT; - return sum; - } - - __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,2\n\t" - ".previous\n" - "1:\n\t" - "call __csum_partial_copy_sparc_generic\n\t" - " st %8, [%%sp + 64]\n" - : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) - : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) - : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", - "cc", "memory"); - return (__force __wsum)ret; +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (unlikely(!access_ok(src, len))) + return 0; + return csum_partial_copy_nocheck((__force void *)src, dst, len); } -#define HAVE_CSUM_COPY_USER - static inline __wsum -csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err) +csum_and_copy_to_user(const void *src, void __user *dst, int len) { - if (!access_ok(dst, len)) { - *err = -EFAULT; - return sum; - } else { - register unsigned long ret asm("o0") = (unsigned long)src; - register char __user *d asm("o1") = dst; - register int l asm("g1") = len; - register __wsum s asm("g7") = sum; - - __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,1\n\t" - ".previous\n" - "1:\n\t" - "call __csum_partial_copy_sparc_generic\n\t" - " st %8, [%%sp + 64]\n" - : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) - : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) - : "o2", "o3", "o4", "o5", "o7", - "g2", "g3", "g4", "g5", - "cc", "memory"); - return (__force __wsum)ret; - } + if (!access_ok(dst, len)) + return 0; + return csum_partial_copy_nocheck(src, (__force void *)dst, len); } /* ihl is always 5 or greater, almost always is 5, and iph is word aligned diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index 0fa4433f5662..d6b59461e064 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -38,42 +38,9 @@ __wsum csum_partial(const void * buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); - -long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); - -static inline __wsum -csum_and_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *err) -{ - long ret = __csum_partial_copy_from_user(src, dst, len, sum); - if (ret < 0) - *err = -EFAULT; - return (__force __wsum) ret; -} - -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); - -static inline __wsum -csum_and_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum, int *err) -{ - long ret = __csum_partial_copy_to_user(src, dst, len, sum); - if (ret < 0) - *err = -EFAULT; - return (__force __wsum) ret; -} +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len); /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 40a267b3bd52..b85842cda99f 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -21,8 +21,7 @@ typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; + struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index 9a52d9506f80..549f0a72280d 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h @@ -11,6 +11,13 @@ #define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz) #define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz) +/* + * Bus number may be embedded in the higher bits of the physical address. + * This is why we have no bus number argument to ioremap(). + */ +void __iomem *ioremap(phys_addr_t offset, size_t size); +void iounmap(volatile void __iomem *addr); + #include <asm-generic/io.h> static inline void _memset_io(volatile void __iomem *dst, @@ -121,14 +128,6 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst, } } -#ifdef __KERNEL__ - -/* - * Bus number may be embedded in the higher bits of the physical address. - * This is why we have no bus number argument to ioremap(). - */ -void __iomem *ioremap(phys_addr_t offset, size_t size); -void iounmap(volatile void __iomem *addr); /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr); void ioport_unmap(void __iomem *); @@ -148,8 +147,6 @@ static inline int sbus_can_burst64(void) struct device; void sbus_set_sbus64(struct device *, int); -#endif - #define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1 diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 4843f48bfe85..774a82b0c649 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -87,7 +87,6 @@ void auxio_set_lte(int on) __auxio_sbus_set_lte(on); break; case AUXIO_TYPE_EBUS: - /* FALL-THROUGH */ default: break; } diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index bfae98ab8638..23f8838dd96e 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p) else return 5; } - /* Fallthrough */ + fallthrough; default: return 4; } diff --git a/arch/sparc/kernel/iommu-common.c b/arch/sparc/kernel/iommu-common.c index 59cb16691322..23ca75f09277 100644 --- a/arch/sparc/kernel/iommu-common.c +++ b/arch/sparc/kernel/iommu-common.c @@ -166,13 +166,6 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, } } - if (dev) - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - 1 << iommu->table_shift); - else - boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); - - boundary_size = boundary_size >> iommu->table_shift; /* * if the skip_span_boundary_check had been set during init, we set * things up so that iommu_is_span_boundary() merely checks if the @@ -181,6 +174,9 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { shift = 0; boundary_size = iommu->poolsize * iommu->nr_pools; + } else { + boundary_size = dma_get_seg_boundary_nr_pages(dev, + iommu->table_shift); } n = iommu_area_alloc(iommu->map, limit, start, npages, shift, boundary_size, align_mask); diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 4ae7388b1bff..a034f571d869 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -10,7 +10,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/errno.h> #include <linux/iommu-helper.h> #include <linux/bitmap.h> @@ -472,8 +472,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, outs->dma_length = 0; max_seg_size = dma_get_max_seg_size(dev); - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d6874c9b639f..8e1d72a16759 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -38,7 +38,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/of_device.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 7580775a14b9..58ad3f7de1fb 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->pc = addr; linux_regs->npc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 5d6c2d287e85..177746ae2c81 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->tpc = addr; linux_regs->tnpc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index dfbca2470536..217c21a6986a 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -453,6 +453,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->u_regs[UREG_RETPC] = @@ -465,58 +466,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + unsigned long orig_ret_address = 0; - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 14b93c5564e3..9de57e88f7a1 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> +#include <linux/dma-map-ops.h> #include <asm/iommu-common.h> #include <asm/iommu.h> @@ -508,8 +509,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); mask = *dev->dma_mask; if (!iommu_use_atu(iommu, mask)) diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c0886b400dad..2a12c86af956 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -359,7 +359,7 @@ int __init pcr_arch_init(void) * counter overflow interrupt so we can't make use of * their hardware currently. */ - /* fallthrough */ + fallthrough; default: err = -ENODEV; goto out_unregister; diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 5234b5ccc0b9..0442ab00518d 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c @@ -25,7 +25,7 @@ asmlinkage long sparc_fork(struct pt_regs *regs) .stack = regs->u_regs[UREG_FP], }; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered @@ -50,7 +50,7 @@ asmlinkage long sparc_vfork(struct pt_regs *regs) .stack = regs->u_regs[UREG_FP], }; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered @@ -96,7 +96,7 @@ asmlinkage long sparc_clone(struct pt_regs *regs) else args.stack = regs->u_regs[UREG_FP]; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index da8902295c8c..3df960c137f7 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -224,7 +224,7 @@ void __init of_console_init(void) case PROMDEV_TTYB: skip = 1; - /* FALLTHRU */ + fallthrough; case PROMDEV_TTYA: type = "serial"; diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index e2c6f0abda00..e9695a06492f 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index f1f8c8ebe641..d0e0025ee3ba 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; @@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->pc -= 4; regs->npc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 6937339a272c..255264bcb46a 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e286e2badc8a..e38d8bf454e8 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void) * are flush_tlb_*() routines, and these run after flush_cache_*() * which performs the flushw. * - * The SMP TLB coherency scheme we use works as follows: - * - * 1) mm->cpu_vm_mask is a bit mask of which cpus an address - * space has (potentially) executed on, this is the heuristic - * we use to avoid doing cross calls. - * - * Also, for flushing from kswapd and also for clones, we - * use cpu_vm_mask as the list of cpus to make run the TLB. - * - * 2) TLB context numbers are shared globally across all processors - * in the system, this allows us to play several games to avoid - * cross calls. - * - * One invariant is that when a cpu switches to a process, and - * that processes tsk->active_mm->cpu_vm_mask does not have the - * current cpu's bit set, that tlb context is flushed locally. - * - * If the address space is non-shared (ie. mm->count == 1) we avoid - * cross calls when we want to flush the currently running process's - * tlb state. This is done by clearing all cpu bits except the current - * processor's in current->mm->cpu_vm_mask and performing the - * flush locally only. This will force any subsequent cpus which run - * this task to flush the context from the local tlb if the process - * migrates to another cpu (again). - * - * 3) For shared address spaces (threads) and swapping we bite the - * bullet for most cases and perform the cross call (but only to - * the cpus listed in cpu_vm_mask). - * - * The performance gain from "optimizing" away the cross call for threads is - * questionable (in theory the big win for threads is the massive sharing of - * address space state across processors). + * mm->cpu_vm_mask is a bit mask of which cpus an address + * space has (potentially) executed on, this is the heuristic + * we use to limit cross calls. */ /* This currently is only used by the hugetlb arch pre-fault @@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void) void smp_flush_tlb_mm(struct mm_struct *mm) { u32 ctx = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (atomic_read(&mm->mm_users) == 1) { - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - goto local_flush_and_out; - } + get_cpu(); smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, mm_cpumask(mm)); -local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); put_cpu(); @@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long { u32 ctx = CTX_HWBITS(mm->context); struct tlb_pending_info info; - int cpu = get_cpu(); + + get_cpu(); info.ctx = ctx; info.nr = nr; info.vaddrs = vaddrs; - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_call_function_many(mm_cpumask(mm), tlb_pending_func, - &info, 1); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); @@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long context = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_cross_call_masked(&xcall_flush_tlb_page, - context, vaddr, 0, - mm_cpumask(mm)); + get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); put_cpu(); diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 4af114e84f20..78160260991b 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -38,7 +38,7 @@ 23 64 setuid sys_setuid 24 32 getuid sys_getuid16 24 64 getuid sys_getuid -25 common vmsplice sys_vmsplice compat_sys_vmsplice +25 common vmsplice sys_vmsplice 26 common ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm 28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack @@ -149,8 +149,8 @@ 117 common getrusage sys_getrusage compat_sys_getrusage 118 common getsockopt sys_getsockopt sys_getsockopt 119 common getcwd sys_getcwd -120 common readv sys_readv compat_sys_readv -121 common writev sys_writev compat_sys_writev +120 common readv sys_readv +121 common writev sys_writev 122 common settimeofday sys_settimeofday compat_sys_settimeofday 123 32 fchown sys_fchown16 123 64 fchown sys_fchown @@ -201,7 +201,7 @@ 164 64 utrap_install sys_utrap_install 165 common quotactl sys_quotactl 166 common set_tid_address sys_set_tid_address -167 common mount sys_mount compat_sys_mount +167 common mount sys_mount 168 common ustat sys_ustat compat_sys_ustat 169 common setxattr sys_setxattr 170 common lsetxattr sys_lsetxattr @@ -406,8 +406,8 @@ 335 common syncfs sys_syncfs 336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 337 common setns sys_setns -338 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -339 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +338 common process_vm_readv sys_process_vm_readv +339 common process_vm_writev sys_process_vm_writev 340 32 kern_features sys_ni_syscall sys_kern_features 340 64 kern_features sys_kern_features 341 common kcmp sys_kcmp @@ -485,3 +485,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index f99e99e58075..d55ae65a07ad 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -187,6 +187,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 6a5469c97246..7488d130faf7 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -144,44 +144,21 @@ cpte: bne csum_partial_end_cruft ! yep, handle it cpout: retl ! get outta here mov %o2, %o0 ! return computed csum - .globl __csum_partial_copy_start, __csum_partial_copy_end -__csum_partial_copy_start: - /* Work around cpp -rob */ #define ALLOC #alloc #define EXECINSTR #execinstr -#define EX(x,y,a,b) \ -98: x,y; \ - .section .fixup,ALLOC,EXECINSTR; \ - .align 4; \ -99: ba 30f; \ - a, b, %o3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 - -#define EX2(x,y) \ -98: x,y; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 30f; \ - .text; \ - .align 4 - -#define EX3(x,y) \ +#define EX(x,y) \ 98: x,y; \ .section __ex_table,ALLOC; \ .align 4; \ - .word 98b, 96f; \ + .word 98b, cc_fault; \ .text; \ .align 4 -#define EXT(start,end,handler) \ +#define EXT(start,end) \ .section __ex_table,ALLOC; \ .align 4; \ - .word start, 0, end, handler; \ + .word start, 0, end, cc_fault; \ .text; \ .align 4 @@ -252,21 +229,21 @@ __csum_partial_copy_start: cc_end_cruft: be 1f andcc %o3, 4, %g0 - EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf) + EX(ldd [%o0 + 0x00], %g2) add %o1, 8, %o1 addcc %g2, %g7, %g7 add %o0, 8, %o0 addxcc %g3, %g7, %g7 - EX2(st %g2, [%o1 - 0x08]) + EX(st %g2, [%o1 - 0x08]) addx %g0, %g7, %g7 andcc %o3, 4, %g0 - EX2(st %g3, [%o1 - 0x04]) + EX(st %g3, [%o1 - 0x04]) 1: be 1f andcc %o3, 3, %o3 - EX(ld [%o0 + 0x00], %g2, add %o3, 4) + EX(ld [%o0 + 0x00], %g2) add %o1, 4, %o1 addcc %g2, %g7, %g7 - EX2(st %g2, [%o1 - 0x04]) + EX(st %g2, [%o1 - 0x04]) addx %g0, %g7, %g7 andcc %o3, 3, %g0 add %o0, 4, %o0 @@ -276,14 +253,14 @@ cc_end_cruft: subcc %o3, 2, %o3 b 4f or %g0, %g0, %o4 -2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2) +2: EX(lduh [%o0 + 0x00], %o4) add %o0, 2, %o0 - EX2(sth %o4, [%o1 + 0x00]) + EX(sth %o4, [%o1 + 0x00]) be 6f add %o1, 2, %o1 sll %o4, 16, %o4 -4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1) - EX2(stb %o5, [%o1 + 0x00]) +4: EX(ldub [%o0 + 0x00], %o5) + EX(stb %o5, [%o1 + 0x00]) sll %o5, 8, %o5 or %o5, %o4, %o4 6: addcc %o4, %g7, %g7 @@ -306,9 +283,9 @@ cc_dword_align: andcc %o0, 0x2, %g0 be 1f andcc %o0, 0x4, %g0 - EX(lduh [%o0 + 0x00], %g4, add %g1, 0) + EX(lduh [%o0 + 0x00], %g4) sub %g1, 2, %g1 - EX2(sth %g4, [%o1 + 0x00]) + EX(sth %g4, [%o1 + 0x00]) add %o0, 2, %o0 sll %g4, 16, %g4 addcc %g4, %g7, %g7 @@ -322,9 +299,9 @@ cc_dword_align: or %g3, %g7, %g7 1: be 3f andcc %g1, 0xffffff80, %g0 - EX(ld [%o0 + 0x00], %g4, add %g1, 0) + EX(ld [%o0 + 0x00], %g4) sub %g1, 4, %g1 - EX2(st %g4, [%o1 + 0x00]) + EX(st %g4, [%o1 + 0x00]) add %o0, 4, %o0 addcc %g4, %g7, %g7 add %o1, 4, %o1 @@ -354,7 +331,7 @@ __csum_partial_copy_sparc_generic: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -10: EXT(5b, 10b, 20f) ! note for exception handling +10: EXT(5b, 10b) ! note for exception handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -379,7 +356,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) -12: EXT(cctbl, 12b, 22f) ! note for exception table handling +12: EXT(cctbl, 12b) ! note for exception table handling addx %g0, %g7, %g7 andcc %o3, 0xf, %g0 ! check for low bits set ccte: bne cc_end_cruft ! something left, handle it out of band @@ -390,7 +367,7 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -11: EXT(ccdbl, 11b, 21f) ! note for exception table handling +11: EXT(ccdbl, 11b) ! note for exception table handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -407,9 +384,9 @@ ccslow: cmp %g1, 0 be,a 1f srl %g1, 1, %g4 sub %g1, 1, %g1 - EX(ldub [%o0], %g5, add %g1, 1) + EX(ldub [%o0], %g5) add %o0, 1, %o0 - EX2(stb %g5, [%o1]) + EX(stb %g5, [%o1]) srl %g1, 1, %g4 add %o1, 1, %o1 1: cmp %g4, 0 @@ -418,34 +395,34 @@ ccslow: cmp %g1, 0 andcc %o0, 2, %g0 be,a 1f srl %g4, 1, %g4 - EX(lduh [%o0], %o4, add %g1, 0) + EX(lduh [%o0], %o4) sub %g1, 2, %g1 srl %o4, 8, %g2 sub %g4, 1, %g4 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %o4, %g5, %g5 - EX2(stb %o4, [%o1 + 1]) + EX(stb %o4, [%o1 + 1]) add %o0, 2, %o0 srl %g4, 1, %g4 add %o1, 2, %o1 1: cmp %g4, 0 be,a 2f andcc %g1, 2, %g0 - EX3(ld [%o0], %o4) + EX(ld [%o0], %o4) 5: srl %o4, 24, %g2 srl %o4, 16, %g3 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) srl %o4, 8, %g2 - EX2(stb %g3, [%o1 + 1]) + EX(stb %g3, [%o1 + 1]) add %o0, 4, %o0 - EX2(stb %g2, [%o1 + 2]) + EX(stb %g2, [%o1 + 2]) addcc %o4, %g5, %g5 - EX2(stb %o4, [%o1 + 3]) + EX(stb %o4, [%o1 + 3]) addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl subcc %g4, 1, %g4 ! tricks bne,a 5b - EX3(ld [%o0], %o4) + EX(ld [%o0], %o4) sll %g5, 16, %g2 srl %g5, 16, %g5 srl %g2, 16, %g2 @@ -453,19 +430,19 @@ ccslow: cmp %g1, 0 add %g2, %g5, %g5 2: be,a 3f andcc %g1, 1, %g0 - EX(lduh [%o0], %o4, and %g1, 3) + EX(lduh [%o0], %o4) andcc %g1, 1, %g0 srl %o4, 8, %g2 add %o0, 2, %o0 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %g5, %o4, %g5 - EX2(stb %o4, [%o1 + 1]) + EX(stb %o4, [%o1 + 1]) add %o1, 2, %o1 3: be,a 1f sll %g5, 16, %o4 - EX(ldub [%o0], %g2, add %g0, 1) + EX(ldub [%o0], %g2) sll %g2, 8, %o4 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %g5, %o4, %g5 sll %g5, 16, %o4 1: addcc %o4, %g5, %g5 @@ -481,113 +458,10 @@ ccslow: cmp %g1, 0 4: addcc %g7, %g5, %g7 retl addx %g0, %g7, %o0 -__csum_partial_copy_end: /* We do these strange calculations for the csum_*_from_user case only, ie. * we only bother with faults on loads... */ -/* o2 = ((g2%20)&3)*8 - * o3 = g1 - (g2/20)*32 - o2 */ -20: - cmp %g2, 20 - blu,a 1f - and %g2, 3, %o2 - sub %g1, 32, %g1 - b 20b - sub %g2, 20, %g2 -1: - sll %o2, 3, %o2 - b 31f - sub %g1, %o2, %o3 - -/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8) - * o3 = g1 - (g2/16)*32 - o2 */ -21: - andcc %g2, 15, %o3 - srl %g2, 4, %g2 - be,a 1f - clr %o2 - add %o3, 1, %o3 - and %o3, 14, %o3 - sll %o3, 3, %o2 -1: - sll %g2, 5, %g2 - sub %g1, %g2, %o3 - b 31f - sub %o3, %o2, %o3 - -/* o0 += (g2/10)*16 - 0x70 - * 01 += (g2/10)*16 - 0x70 - * o2 = (g2 % 10) ? 8 : 0 - * o3 += 0x70 - (g2/10)*16 - o2 */ -22: - cmp %g2, 10 - blu,a 1f - sub %o0, 0x70, %o0 - add %o0, 16, %o0 - add %o1, 16, %o1 - sub %o3, 16, %o3 - b 22b - sub %g2, 10, %g2 -1: - sub %o1, 0x70, %o1 - add %o3, 0x70, %o3 - clr %o2 - tst %g2 - bne,a 1f - mov 8, %o2 -1: - b 31f - sub %o3, %o2, %o3 -96: - and %g1, 3, %g1 - sll %g4, 2, %g4 - add %g1, %g4, %o3 -30: -/* %o1 is dst - * %o3 is # bytes to zero out - * %o4 is faulting address - * %o5 is %pc where fault occurred */ - clr %o2 -31: -/* %o0 is src - * %o1 is dst - * %o2 is # of bytes to copy from src to dst - * %o3 is # bytes to zero out - * %o4 is faulting address - * %o5 is %pc where fault occurred */ - save %sp, -104, %sp - mov %i5, %o0 - mov %i7, %o1 - mov %i4, %o2 - call lookup_fault - mov %g7, %i4 - cmp %o0, 2 - bne 1f - add %g0, -EFAULT, %i5 - tst %i2 - be 2f - mov %i0, %o1 - mov %i1, %o0 -5: - call memcpy - mov %i2, %o2 - tst %o0 - bne,a 2f - add %i3, %i2, %i3 - add %i1, %i2, %i1 -2: - mov %i1, %o0 -6: - call __bzero - mov %i3, %o1 -1: - ld [%sp + 168], %o2 ! struct_ptr of parent - st %i5, [%o2] +cc_fault: ret - restore - - .section __ex_table,#alloc - .align 4 - .word 5b,2 - .word 6b,2 + clr %o0 diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index 26c644ba3ecb..0c0268e77155 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -68,9 +68,10 @@ .globl FUNC_NAME .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) -FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ +FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 + mov 1, %o3 clr %o4 andcc %g1, 0x3, %g0 bne,pn %icc, 95f diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S index d20b9594f0c7..b0ba8d4dd439 100644 --- a/arch/sparc/lib/csum_copy_from_user.S +++ b/arch/sparc/lib/csum_copy_from_user.S @@ -9,14 +9,14 @@ .section .fixup, "ax"; \ .align 4; \ 99: retl; \ - mov -1, %o0; \ + mov 0, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ .text; \ .align 4; -#define FUNC_NAME __csum_partial_copy_from_user +#define FUNC_NAME csum_and_copy_from_user #define LOAD(type,addr,dest) type##a [addr] %asi, dest #include "csum_copy.S" diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S index d71c0c81e8ab..91ba36dbf7d2 100644 --- a/arch/sparc/lib/csum_copy_to_user.S +++ b/arch/sparc/lib/csum_copy_to_user.S @@ -9,14 +9,14 @@ .section .fixup,"ax"; \ .align 4; \ 99: retl; \ - mov -1, %o0; \ + mov 0, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ .text; \ .align 4; -#define FUNC_NAME __csum_partial_copy_to_user +#define FUNC_NAME csum_and_copy_to_user #define STORE(type,src,addr) type##a src, [addr] %asi #include "csum_copy.S" diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index 72e560ef4a09..d5beec856146 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } - /* fall through */ + fallthrough; case 1: rd = (void *)&fregs[freg]; break; diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 8071bfd72349..40ce087dfecf 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -288,8 +288,6 @@ no_context: if (fixup > 10) { extern const unsigned int __memset_start[]; extern const unsigned int __memset_end[]; - extern const unsigned int __csum_partial_copy_start[]; - extern const unsigned int __csum_partial_copy_end[]; #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%08lx> faddr<%08lx>\n", @@ -298,9 +296,7 @@ no_context: regs->pc, fixup, g2); #endif if ((regs->pc >= (unsigned long)__memset_start && - regs->pc < (unsigned long)__memset_end) || - (regs->pc >= (unsigned long)__csum_partial_copy_start && - regs->pc < (unsigned long)__csum_partial_copy_end)) { + regs->pc < (unsigned long)__memset_end)) { regs->u_regs[UREG_I4] = address; regs->u_regs[UREG_I5] = regs->pc; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index fad6d3129904..96edf64d4fb3 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1192,18 +1192,14 @@ int of_node_to_nid(struct device_node *dp) static void __init add_node_ranges(void) { - struct memblock_region *reg; + phys_addr_t start, end; unsigned long prev_max; + u64 i; memblock_resized: prev_max = memblock.memory.max; - for_each_memblock(memory, reg) { - unsigned long size = reg->size; - unsigned long start, end; - - start = reg->base; - end = start + size; + for_each_mem_range(i, &start, &end) { while (start < end) { unsigned long this_end; int nid; @@ -1211,7 +1207,7 @@ memblock_resized: this_end = memblock_nid_range(start, end, &nid); numadbg("Setting memblock NUMA node nid[%d] " - "start[%lx] end[%lx]\n", + "start[%llx] end[%lx]\n", nid, start, this_end); memblock_set_node(start, this_end - start, diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 430a47a1b6ae..bf3e6d2fe5d9 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -11,7 +11,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/bitops.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_device.h> diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 3a388b1c5d4b..0c0342e5b10d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -12,7 +12,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_device.h> diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index c8eabb973b86..b1dbf2fa8c0a 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp) } else { emit_loadimm(K, r_A); } - /* Fallthrough */ + fallthrough; case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { |