diff options
author | Linus Torvalds | 2017-09-10 09:57:23 -0700 |
---|---|---|
committer | Linus Torvalds | 2017-09-10 09:57:23 -0700 |
commit | d719518d9ce9132bad8a06e8029aeead328f66a3 (patch) | |
tree | f5a143973255868f68d3b71fb5bd4715326f0e52 /arch | |
parent | 4c2b5e0fab602b468ce4d8c4d78c99f2caa79a3e (diff) | |
parent | b6fe1089667a7afcc2cf92cdaec590c7b8381715 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
Pull sparc updates from David Miller:
1) Use register window state adjustment instructions when available,
from Anthony Yznaga.
2) Add VCC console concentrator driver, from Jag Raman.
3) Add 16GB hugepage support, from Nitin Gupta.
4) Support cpu 'poke' hypercall, from Vijay Kumar.
5) Add M7/M8 optimized memcpy/memset/copy_{to,from}_user, from Babu
Moger.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (33 commits)
sparc64: Handle additional cases of no fault loads
sparc64: speed up etrap/rtrap on NG2 and later processors
sparc64: vcc: make ktermios const
sparc: leon: grpci1: constify of_device_id
sparc: leon: grpci2: constify of_device_id
sparc64: vcc: Check for IS_ERR() instead of NULL
sparc64: Cleanup hugepage table walk functions
sparc64: Add 16GB hugepage support
sparc64: Support huge PUD case in get_user_pages
sparc64: vcc: Add install & cleanup TTY operations
sparc64: vcc: Add break_ctl TTY operation
sparc64: vcc: Add chars_in_buffer TTY operation
sparc64: vcc: Add write & write_room TTY operations
sparc64: vcc: Add hangup TTY operation
sparc64: vcc: Add open & close TTY operations
sparc64: vcc: Enable LDC event processing engine
sparc64: vcc: Add RX & TX timer for delayed LDC operation
sparc64: vcc: Create sysfs attribute group
sparc64: vcc: Enable VCC port probe and removal
sparc64: vcc: TTY driver initialization and cleanup
...
Diffstat (limited to 'arch')
37 files changed, 2326 insertions, 297 deletions
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index ca8609d7292f..4d4e1cc6402f 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -238,3 +238,4 @@ CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC16=m CONFIG_LIBCRC32C=m +CONFIG_VCC=m diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index d1f837dc77a4..0ca7caab1b06 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -4,6 +4,13 @@ #include <asm/page.h> #include <asm-generic/hugetlb.h> +#ifdef CONFIG_HUGETLB_PAGE +struct pud_huge_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; +#endif void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 73cb8978df58..3dc9215d0357 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid); unsigned long sun4v_cpu_yield(void); #endif +/* cpu_poke() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_POKE + * RET0: status + * ERRORS: ENOCPU cpuid refers to a CPU that does not exist + * EINVAL cpuid is current CPU + * + * Poke CPU cpuid. If the target CPU is currently suspended having + * invoked the cpu-yield service, that vCPU will be resumed. + * Poke interrupts may only be sent to valid, non-local CPUs. + * It is not legal to poke the current vCPU. + */ +#define HV_FAST_CPU_POKE 0x13 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_cpu_poke(unsigned long cpuid); +#endif + /* cpu_qconf() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_QCONF diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 5961b2d8398a..8ee1f97589a1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_16GB_SHIFT 34 #define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 @@ -28,7 +29,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 4 +#define HUGE_MAX_HSTATE 5 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6fbd931f0570..4fefe3762083 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); } +static inline bool is_hugetlb_pud(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_PUD_HUGE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { @@ -687,6 +692,8 @@ static inline unsigned long pmd_write(pmd_t pmd) return pte_write(pte); } +#define pud_write(pud) pte_write(__pte(pud_val(pud))) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline unsigned long pmd_dirty(pmd_t pmd) { @@ -823,9 +830,18 @@ static inline unsigned long __pmd_page(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + unsigned long pfn; + + pfn = pte_pfn(pte); + + return ((unsigned long) __va(pfn << PAGE_SHIFT)); +} + #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) -#define pud_page_vaddr(pud) \ - ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index ce2233f7e662..a75089285db8 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -33,6 +33,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); extern cpumask_t cpu_core_map[NR_CPUS]; +void smp_init_cpu_poke(void); +void scheduler_poke(void); + void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu); #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) #define smp_fill_in_cpu_possible_map() do { } while (0) +#define smp_init_cpu_poke() do { } while (0) +#define scheduler_poke() do { } while (0) #endif /* !(CONFIG_SMP) */ diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index ff05992dae7a..dfc538609eb2 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -73,6 +73,8 @@ struct sun4v_1insn_patch_entry { }; extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, __sun4v_1insn_patch_end; +extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch, + __fast_win_ctrl_1insn_patch_end; struct sun4v_2insn_patch_entry { unsigned int addr; diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 32258e08da03..acf55063aa3d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; nop; \ 699: + /* PUD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PUD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PUD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We have to propagate bits [32:22] from the virtual address + * to resolve at 4M granularity. + */ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ +700: ba 700f; \ + nop; \ + .section .pud_huge_patch, "ax"; \ + .word 700b; \ + nop; \ + .previous; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PUD_HUGE), REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(0x1ffc0000), REG2; \ + sllx REG2, 1, REG2; \ + brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it @@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d1c47e9f0090..f3d4ac232690 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -52,6 +52,7 @@ struct vio_ver_info { #define VDEV_NETWORK_SWITCH 0x02 #define VDEV_DISK 0x03 #define VDEV_DISK_SERVER 0x04 +#define VDEV_CONSOLE_CON 0x05 u8 resv1[3]; u64 resv2[5]; @@ -282,6 +283,14 @@ struct vio_dring_state { struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES]; }; +#define VIO_TAG_SIZE ((int)sizeof(struct vio_msg_tag)) +#define VIO_VCC_MTU_SIZE (LDC_PACKET_SIZE - VIO_TAG_SIZE) + +struct vio_vcc { + struct vio_msg_tag tag; + char data[VIO_VCC_MTU_SIZE]; +}; + static inline void *vio_dring_cur(struct vio_dring_state *dr) { return dr->base + (dr->entry_size * dr->prod); diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 1276ca2567ba..5c237467d156 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -38,7 +38,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) or %g1, %g3, %g1 bne,pn %xcc, 1f sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 - wrpr %g0, 7, %cleanwin +661: wrpr %g0, 7, %cleanwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x85880000 ! allclean + .previous sethi %hi(TASK_REGOFF), %g2 sethi %hi(TSTATE_PEF), %g3 @@ -88,16 +92,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp bne,pn %xcc, 3f mov PRIMARY_CONTEXT, %l4 - rdpr %canrestore, %g3 +661: rdpr %canrestore, %g3 + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + rdpr %wstate, %g2 - wrpr %g0, 0, %canrestore +661: wrpr %g0, 0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous sll %g2, 3, %g2 /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ mov 1, %l5 sth %l5, [%l6 + TI_SYS_NOERROR] - wrpr %g3, 0, %otherwin +661: wrpr %g3, 0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x87880000 ! otherw + .previous + wrpr %g2, 0, %wstate sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 78e0211753d2..4de9fbd1a177 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -603,10 +603,10 @@ niagara_tlb_fixup: be,pt %xcc, niagara4_patch nop cmp %g1, SUN4V_CHIP_SPARC_M7 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_M8 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_SN be,pt %xcc, niagara4_patch @@ -621,6 +621,18 @@ niagara_tlb_fixup: ba,a,pt %xcc, 80f nop + +sparc_m7_patch: + call m7_patch_copyops + nop + call m7_patch_bzero + nop + call m7_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + niagara4_patch: call niagara4_patch_copyops nop @@ -881,7 +893,6 @@ sparc64_boot_end: #include "misctrap.S" #include "syscalls.S" #include "helpers.S" -#include "hvcalls.S" #include "sun4v_tlb_miss.S" #include "sun4v_ivec.S" #include "ktlb.S" @@ -926,6 +937,7 @@ swapper_4m_tsb: ! 0x0000000000428000 +#include "hvcalls.S" #include "systbls_64.S" .data diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 267731234ce8..d41ce33d87d6 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void) group = HV_GRP_CORE; major = 1; - minor = 1; + minor = 6; if (sun4v_hvapi_register(group, major, &minor)) goto bad; diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index 4116ee5c7791..e57007ff7f8f 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield) nop ENDPROC(sun4v_cpu_yield) + /* %o0: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_poke) + mov HV_FAST_CPU_POKE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_poke) + /* %o0: type * %o1: queue paddr * %o2: num queue entries diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 840e0b21bfe3..acffbc894ab0 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1480,6 +1480,7 @@ int ldc_rx_reset(struct ldc_channel *lp) { return __set_rx_head(lp, lp->rx_tail); } +EXPORT_SYMBOL(ldc_rx_reset); void __ldc_print(struct ldc_channel *lp, const char *caller) { @@ -1493,6 +1494,7 @@ void __ldc_print(struct ldc_channel *lp, const char *caller) lp->tx_head, lp->tx_tail, lp->tx_num_entries, lp->rcv_nxt, lp->snd_nxt); } +EXPORT_SYMBOL(__ldc_print); static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) { diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 1e77128a8f88..83ba5005d44c 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -695,7 +695,7 @@ err1: return err; } -static struct of_device_id grpci1_of_match[] = { +static const struct of_device_id grpci1_of_match[] __initconst = { { .name = "GAISLER_PCIFBRG", }, diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index f727c4de1316..ff0e5c90310f 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -886,7 +886,7 @@ err1: return err; } -static struct of_device_id grpci2_of_match[] = { +static const struct of_device_id grpci2_of_match[] __initconst = { { .name = "GAISLER_GRPCI2", }, diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index b96104da5bd6..44e5da405f96 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -77,8 +77,13 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); - if (!need_resched() && !cpu_is_offline(smp_processor_id())) + if (!need_resched() && !cpu_is_offline(smp_processor_id())) { sun4v_cpu_yield(); + /* If resumed by cpu_poke then we need to explicitly + * call scheduler_ipi(). + */ + scheduler_poke(); + } /* Re-enable interrupts. */ __asm__ __volatile__( diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 709a82ebd294..dff86fad0a1f 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -224,10 +224,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 rdpr %otherwin, %l2 srl %l1, 3, %l1 - wrpr %l2, %g0, %canrestore +661: wrpr %l2, %g0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x89880000 ! normalw + .previous + wrpr %l1, %g0, %wstate brnz,pt %l2, user_rtt_restore - wrpr %g0, %g0, %otherwin +661: wrpr %g0, %g0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous ldx [%g6 + TI_FLAGS], %g3 wr %g0, ASI_AIUP, %asi diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 150ee7d4b059..db4c4d7e28a0 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -300,6 +300,11 @@ static void __init sun4v_patch(void) break; } + if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) { + sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch, + &__fast_win_ctrl_1insn_patch_end); + } + sun4v_hvapi_init(); } @@ -363,6 +368,7 @@ void __init start_early_boot(void) check_if_starfire(); per_cpu_patch(); sun4v_patch(); + smp_init_cpu_poke(); cpu = hard_smp_processor_id(); if (cpu >= NR_CPUS) { diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 3218bc43302e..4898329970c5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map); static cpumask_t smp_commenced_mask; +static DEFINE_PER_CPU(bool, poke); +static bool cpu_poke; + void smp_info(struct seq_file *m) { int i; @@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus) { } +static void send_cpu_ipi(int cpu) +{ + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); +} + +void scheduler_poke(void) +{ + if (!cpu_poke) + return; + + if (!__this_cpu_read(poke)) + return; + + __this_cpu_write(poke, false); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); +} + +static unsigned long send_cpu_poke(int cpu) +{ + unsigned long hv_err; + + per_cpu(poke, cpu) = true; + hv_err = sun4v_cpu_poke(cpu); + if (hv_err != HV_EOK) { + per_cpu(poke, cpu) = false; + pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", + __func__, hv_err); + } + + return hv_err; +} + void smp_send_reschedule(int cpu) { if (cpu == smp_processor_id()) { WARN_ON_ONCE(preemptible()); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); - } else { - xcall_deliver((u64) &xcall_receive_signal, - 0, 0, cpumask_of(cpu)); + return; + } + + /* Use cpu poke to resume idle cpu if supported. */ + if (cpu_poke && idle_cpu(cpu)) { + unsigned long ret; + + ret = send_cpu_poke(cpu); + if (ret == HV_EOK) + return; } + + /* Use IPI in following cases: + * - cpu poke not supported + * - cpu not idle + * - send_cpu_poke() returns with error + */ + send_cpu_ipi(cpu); +} + +void smp_init_cpu_poke(void) +{ + unsigned long major; + unsigned long minor; + int ret; + + if (tlb_type != hypervisor) + return; + + ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); + if (ret) { + pr_debug("HV_GRP_CORE is not registered\n"); + return; + } + + if (major == 1 && minor >= 6) { + /* CPU POKE is registered. */ + cpu_poke = true; + return; + } + + pr_debug("CPU_POKE not supported\n"); } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index ad31af1dd726..c74f2dffcc13 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -265,6 +265,45 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u sun4v_insn_access_exception(regs, addr, type_ctx); } +bool is_no_fault_exception(struct pt_regs *regs) +{ + unsigned char asi; + u32 insn; + + if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT) + return false; + + /* + * Must do a little instruction decoding here in order to + * decide on a course of action. The bits of interest are: + * insn[31:30] = op, where 3 indicates the load/store group + * insn[24:19] = op3, which identifies individual opcodes + * insn[13] indicates an immediate offset + * op3[4]=1 identifies alternate space instructions + * op3[5:4]=3 identifies floating point instructions + * op3[2]=1 identifies stores + * See "Opcode Maps" in the appendix of any Sparc V9 + * architecture spec for full details. + */ + if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */ + if (insn & 0x2000) /* immediate offset */ + asi = (regs->tstate >> 24); /* saved %asi */ + else + asi = (insn >> 5); /* immediate asi */ + if ((asi & 0xf2) == ASI_PNF) { + if (insn & 0x1000000) { /* op3[5:4]=3 */ + handle_ldf_stq(insn, regs); + return true; + } else if (insn & 0x200000) { /* op3[2], stores */ + return false; + } + handle_ld_nf(insn, regs); + return true; + } + } + return false; +} + void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); @@ -296,6 +335,9 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un die_if_kernel("Dax", regs); } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -352,6 +394,9 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -2575,6 +2620,9 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); goto out; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -2597,6 +2645,9 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db872dbfafe9..f74115364b1e 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - sethi %uhi(_PAGE_PMD_HUGE), %g7 + sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 sllx %g7, 32, %g7 andcc %g5, %g7, %g0 diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1c8763c9c52b..da1ac3f22b24 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -246,6 +246,7 @@ u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) return node; } +EXPORT_SYMBOL(vio_vdev_node); static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, struct vio_dev *vdev) diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index d4f13c037a40..dcd278f29573 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -814,15 +814,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, case VDEV_NETWORK_SWITCH: case VDEV_DISK: case VDEV_DISK_SERVER: + case VDEV_CONSOLE_CON: break; default: return -EINVAL; } - if (!ops || !ops->send_attr || !ops->handle_attr || - !ops->handshake_complete) - return -EINVAL; + if (dev_class == VDEV_NETWORK || + dev_class == VDEV_NETWORK_SWITCH || + dev_class == VDEV_DISK || + dev_class == VDEV_DISK_SERVER) { + if (!ops || !ops->send_attr || !ops->handle_attr || + !ops->handshake_complete) + return -EINVAL; + } if (!ver_table || ver_table_size < 0) return -EINVAL; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 03b3d65d1266..d78847d56a4b 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -154,6 +154,16 @@ SECTIONS *(.get_tick_patch) __get_tick_patch_end = .; } + .pud_huge_patch : { + __pud_huge_patch = .; + *(.pud_huge_patch) + __pud_huge_patch_end = .; + } + .fast_win_ctrl_1insn_patch : { + __fast_win_ctrl_1insn_patch = .; + *(.fast_win_ctrl_1insn_patch) + __fast_win_ctrl_1insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/sparc/lib/M7copy_from_user.S b/arch/sparc/lib/M7copy_from_user.S new file mode 100644 index 000000000000..66464b3e3649 --- /dev/null +++ b/arch/sparc/lib/M7copy_from_user.S @@ -0,0 +1,40 @@ +/* + * M7copy_from_user.S: SPARC M7 optimized copy from userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_LD(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME M7copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7copy_to_user.S b/arch/sparc/lib/M7copy_to_user.S new file mode 100644 index 000000000000..a60ac467f808 --- /dev/null +++ b/arch/sparc/lib/M7copy_to_user.S @@ -0,0 +1,51 @@ +/* + * M7copy_to_user.S: SPARC M7 optimized copy to userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_ST(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME M7copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S new file mode 100644 index 000000000000..cbd42ea7c3f7 --- /dev/null +++ b/arch/sparc/lib/M7memcpy.S @@ -0,0 +1,923 @@ +/* + * M7memcpy: Optimized SPARC M7 memcpy + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + .file "M7memcpy.S" + +/* + * memcpy(s1, s2, len) + * + * Copy s2 to s1, always copy n bytes. + * Note: this C code does not work for overlapped copies. + * + * Fast assembler language version of the following C-program for memcpy + * which represents the `standard' for the C-library. + * + * void * + * memcpy(void *s, const void *s0, size_t n) + * { + * if (n != 0) { + * char *s1 = s; + * const char *s2 = s0; + * do { + * *s1++ = *s2++; + * } while (--n != 0); + * } + * return (s); + * } + * + * + * SPARC T7/M7 Flow : + * + * if (count < SMALL_MAX) { + * if count < SHORTCOPY (SHORTCOPY=3) + * copy bytes; exit with dst addr + * if src & dst aligned on word boundary but not long word boundary, + * copy with ldw/stw; branch to finish_up + * if src & dst aligned on long word boundary + * copy with ldx/stx; branch to finish_up + * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) + * copy bytes; exit with dst addr + * move enough bytes to get src to word boundary + * if dst now on word boundary + * move_words: + * copy words; branch to finish_up + * if dst now on half word boundary + * load words, shift half words, store words; branch to finish_up + * if dst on byte 1 + * load words, shift 3 bytes, store words; branch to finish_up + * if dst on byte 3 + * load words, shift 1 byte, store words; branch to finish_up + * finish_up: + * copy bytes; exit with dst addr + * } else { More than SMALL_MAX bytes + * move bytes until dst is on long word boundary + * if( src is on long word boundary ) { + * if (count < MED_MAX) { + * finish_long: src/dst aligned on 8 bytes + * copy with ldx/stx in 8-way unrolled loop; + * copy final 0-63 bytes; exit with dst addr + * } else { src/dst aligned; count > MED_MAX + * align dst on 64 byte boundary; for main data movement: + * prefetch src data to L2 cache; let HW prefetch move data to L1 cache + * Use BIS (block initializing store) to avoid copying store cache + * lines from memory. But pre-store first element of each cache line + * ST_CHUNK lines in advance of the rest of that cache line. That + * gives time for replacement cache lines to be written back without + * excess STQ and Miss Buffer filling. Repeat until near the end, + * then finish up storing before going to finish_long. + * } + * } else { src/dst not aligned on 8 bytes + * if src is word aligned and count < MED_WMAX + * move words in 8-way unrolled loop + * move final 0-31 bytes; exit with dst addr + * if count < MED_UMAX + * use alignaddr/faligndata combined with ldd/std in 8-way + * unrolled loop to move data. + * go to unalign_done + * else + * setup alignaddr for faligndata instructions + * align dst on 64 byte boundary; prefetch src data to L1 cache + * loadx8, falign, block-store, prefetch loop + * (only use block-init-store when src/dst on 8 byte boundaries.) + * unalign_done: + * move remaining bytes for unaligned cases. exit with dst addr. + * } + * + */ + +#include <asm/visasm.h> +#include <asm/asi.h> + +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +/* + * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache + * line as "least recently used" which means if many threads are + * active, it has a high probability of being pushed out of the cache + * between the first initializing store and the final stores. + * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which + * marks the cache line as "most recently used" for all + * but the last cache line + */ +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_MRU_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P +#else +#define STORE_MRU_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef STORE_INIT_MRU +#define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME M7memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#define BLOCK_SIZE 64 +#define SHORTCOPY 3 +#define SHORTCHECK 14 +#define SHORT_LONG 64 /* max copy for short longword-aligned case */ + /* must be at least 64 */ +#define SMALL_MAX 128 +#define MED_UMAX 1024 /* max copy for medium un-aligned case */ +#define MED_WMAX 1024 /* max copy for medium word-aligned case */ +#define MED_MAX 1024 /* max copy for medium longword-aligned case */ +#define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ +#define ALIGN_PRE 24 /* distance for aligned prefetch loop */ + + .register %g2,#scratch + + .section ".text" + .global FUNC_NAME + .type FUNC_NAME, #function + .align 16 +FUNC_NAME: + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %g1 ! save %o0 + brz,pn %o2, .Lsmallx + cmp %o2, 3 + ble,pn %icc, .Ltiny_cp + cmp %o2, 19 + ble,pn %icc, .Lsmall_cp + or %o0, %o1, %g2 + cmp %o2, SMALL_MAX + bl,pn %icc, .Lmedium_cp + nop + +.Lmedium: + neg %o0, %o5 + andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned + brz,pt %o5, .Ldst_aligned_on_8 + + ! %o5 has the bytes to be written in partial store. + sub %o2, %o5, %o2 + sub %o1, %o0, %o1 ! %o1 gets the difference +7: ! dst aligning loop + add %o1, %o0, %o4 + EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte + subcc %o5, 1, %o5 + EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) + bgu,pt %xcc, 7b + add %o0, 1, %o0 ! advance dst + add %o1, %o0, %o1 ! restore %o1 +.Ldst_aligned_on_8: + andcc %o1, 7, %o5 + brnz,pt %o5, .Lsrc_dst_unaligned_on_8 + nop + +.Lsrc_dst_aligned_on_8: + ! check if we are copying MED_MAX or more bytes + set MED_MAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bgu,pn %xcc, .Llarge_align8_copy + nop + +/* + * Special case for handling when src and dest are both long word aligned + * and total data to move is less than MED_MAX bytes + */ +.Lmedlong: + subcc %o2, 63, %o2 ! adjust length to allow cc test + ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes + nop +.Lmedl64: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load + subcc %o2, 64, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) + EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) + EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store + EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) + EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 + add %o1, 64, %o1 ! increase src ptr by 64 + EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) + add %o0, 64, %o0 ! increase dst ptr by 64 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) + bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) +.Lmedl63: + addcc %o2, 32, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load + sub %o2, 32, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 + add %o1, 32, %o1 ! increase src ptr by 32 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedl31: + addcc %o2, 16, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left + nop ! + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) + sub %o2, 16, %o2 ! decrease count by 16 + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) +.Lmedl15: + addcc %o2, 15, %o2 ! restore count + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + add %o0, 8, %o0 ! increase dst ptr by 8 + subcc %o2, 8, %o2 ! decrease count by 8 + bnz,pn %xcc, .Lmedw7 + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 + + .align 16 +.Lsrc_dst_unaligned_on_8: + ! DST is 8-byte aligned, src is not +2: + andcc %o1, 0x3, %o5 ! test word alignment + bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned + nop + +/* + * Handle all cases where src and dest are aligned on word + * boundaries. Use unrolled loops for better performance. + * This option wins over standard large data move when + * source and destination is in cache for.Lmedium + * to short data moves. + */ + set MED_WMAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop + nop + + subcc %o2, 31, %o2 ! adjust length to allow cc test + ! for end of loop + ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 +.Lmedw32: + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) + subcc %o2, 32, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) + add %o1, 32, %o1 ! increase src ptr by 32 + EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) + or %o4, %o5, %o5 + bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedw31: + addcc %o2, 31, %o2 ! restore count + + bz,pt %xcc, .Lsmallx ! exit if finished + nop + cmp %o2, 16 + blt,pt %xcc, .Lmedw15 + nop + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes + sllx %o4, 32, %o5 + subcc %o2, 16, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) +.Lmedw15: + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + subcc %o2, 8, %o2 ! decrease count by 8 + EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes + add %o0, 8, %o0 ! increase dst ptr by 8 + EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + bz,pt %xcc, .Lsmallx ! exit if finished +.Lmedw7: ! count is ge 1, less than 8 + cmp %o2, 4 ! check for 4 bytes left + blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left + nop ! + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + add %o1, 4, %o1 ! increase src ptr by 4 + add %o0, 4, %o0 ! increase dst ptr by 4 + subcc %o2, 4, %o2 ! decrease count by 4 + bnz .Lsmallleft3 + EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + retl + mov EX_RETVAL(%g1), %o0 + + .align 16 +.Llarge_align8_copy: ! Src and dst share 8 byte alignment + ! align dst to 64 byte boundary + andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned + brz,pn %o3, .Laligned_to_64 + andcc %o0, 8, %o3 ! odd long words to move? + brz,pt %o3, .Laligned_to_16 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 8, %o2 + add %o1, 8, %o1 ! increment src ptr + add %o0, 8, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_16: + andcc %o0, 16, %o3 ! pair of long words to move? + brz,pt %o3, .Laligned_to_32 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 16, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_32: + andcc %o0, 32, %o3 ! four long words to move? + brz,pt %o3, .Laligned_to_64 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 32, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) + add %o1, 32, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 32, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_64: +! +! Using block init store (BIS) instructions to avoid fetching cache +! lines from memory. Use ST_CHUNK stores to first element of each cache +! line (similar to prefetching) to avoid overfilling STQ or miss buffers. +! Gives existing cache lines time to be moved out of L1/L2/L3 cache. +! Initial stores using MRU version of BIS to keep cache line in +! cache until we are ready to store final element of cache line. +! Then store last element using the LRU version of BIS. +! + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 +! +! We use STORE_MRU_ASI for the first seven stores to each cache line +! followed by STORE_ASI (mark as LRU) for the last store. That +! mixed approach reduces the probability that the cache line is removed +! before we finish setting it, while minimizing the effects on +! other cached values during a large memcpy +! +! ST_CHUNK batches up initial BIS operations for several cache lines +! to allow multiple requests to not be blocked by overflowing the +! the store miss buffer. Then the matching stores for all those +! BIS operations are executed. +! + + sub %o0, 8, %o0 ! adjust %o0 for ASI alignment +.Lalign_loop: + cmp %o5, ST_CHUNK*64 + blu,pt %xcc, .Lalign_loop_fin + mov ST_CHUNK,%o3 +.Lalign_loop_start: + prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + bgu %xcc,.Lalign_loop_start + add %o0, 56, %o0 + + mov ST_CHUNK,%o3 + sllx %o3, 6, %o4 ! ST_CHUNK*64 + sub %o1, %o4, %o1 ! reset %o1 + sub %o0, %o4, %o0 ! reset %o0 + +.Lalign_loop_rest: + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + add %o0, 16, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) + sub %o5, 64, %o5 + bgu %xcc,.Lalign_loop_rest + ! mark cache line as LRU + EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) + + cmp %o5, ST_CHUNK*64 + bgu,pt %xcc, .Lalign_loop_start + mov ST_CHUNK,%o3 + + cmp %o5, 0 + beq .Lalign_done + nop +.Lalign_loop_fin: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) + add %o1, 64, %o1 + EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) + add %o0, 64, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) + bgu %xcc,.Lalign_loop_fin + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) + +.Lalign_done: + add %o0, 8, %o0 ! restore %o0 from ASI alignment + membar #StoreStore + sub %o2, 63, %o2 ! adjust length to allow cc test + ba .Lmedl63 ! in .Lmedl63 + nop + + .align 16 + ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX +.Lunalignsetup: +.Lunalignrejoin: + mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) +#else + VISEntryHalf +#endif + mov %o3, %g1 ! restore %g1 + + set MED_UMAX, %o3 + cmp %o2, %o3 ! check for.Lmedium unaligned limit + bge,pt %xcc,.Lunalign_large + prefetch [%o1 + (4 * BLOCK_SIZE)], 20 + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + cmp %o2, 8 ! Insure we do not load beyond + bgt .Lunalign_adjust ! end of source buffer + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o2, 64, %o2 ! adjust to leave loop + sub %o5, 64, %o5 ! early if necessary +.Lunalign_adjust: + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) +.Lunalign_loop: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) + subcc %o5, BLOCK_SIZE, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) + EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) + EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) + EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) + faligndata %f10, %f12, %f26 + EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) + add %o4, BLOCK_SIZE, %o4 + EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) + EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) + add %o0, BLOCK_SIZE, %o0 + bgu,pt %xcc, .Lunalign_loop + prefetch [%o4 + (5 * BLOCK_SIZE)], 20 + ba .Lunalign_done + nop + +.Lunalign_large: + andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? + bz %xcc, .Lunalignsrc + sub %o3, 64, %o3 ! %o3 will be multiple of 8 + neg %o3 ! bytes until dest is 64 byte aligned + sub %o2, %o3, %o2 ! update cnt with bytes to be moved + ! Move bytes according to source alignment + andcc %o1, 0x1, %o5 + bnz %xcc, .Lunalignbyte ! check for byte alignment + nop + andcc %o1, 2, %o5 ! check for half word alignment + bnz %xcc, .Lunalignhalf + nop + ! Src is word aligned +.Lunalignword: + EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 + subcc %o3, 8, %o3 ! decrease count by 8 + EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 + add %o0, 8, %o0 ! increase dst ptr by 8 + bnz %xcc, .Lunalignword + EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) + ba .Lunalignsrc + nop + + ! Src is half-word aligned +.Lunalignhalf: + EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes + sllx %o4, 32, %o5 ! shift left + EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + sllx %o5, 16, %o5 + EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + add %o1, 8, %o1 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignhalf + add %o0, 8, %o0 + ba .Lunalignsrc + nop + + ! Src is Byte aligned +.Lunalignbyte: + sub %o0, %o1, %o0 ! share pointer advance +.Lunalignbyte_loop: + EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 56, %o5 + EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 40, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 24, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 8, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + add %o0, %o1, %o0 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + sub %o0, %o1, %o0 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignbyte_loop + add %o1, 8, %o1 + add %o0,%o1, %o0 ! restore pointer + + ! Destination is now block (64 byte aligned) +.Lunalignsrc: + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + add %o2, 64, %o2 ! Insure we do not load beyond + sub %o5, 64, %o5 ! end of source buffer + + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + + EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 +.Lunalign_sloop: + EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) + faligndata %f14, %f16, %f0 + EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) + faligndata %f16, %f18, %f2 + EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) + faligndata %f18, %f20, %f4 + EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f20, %f22, %f6 + EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f22, %f24, %f8 + EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f24, %f26, %f10 + EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f26, %f28, %f12 + EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) + add %o4, 64, %o4 + EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f28, %f30, %f14 + EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) + add %o0, 64, %o0 + EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) + fsrc2 %f30, %f14 + bgu,pt %xcc, .Lunalign_sloop + prefetch [%o4 + (8 * BLOCK_SIZE)], 20 + +.Lunalign_done: + ! Handle trailing bytes, 64 to 127 + ! Dest long word aligned, Src not long word aligned + cmp %o2, 15 + bleu %xcc, .Lunalign_short + + andn %o2, 0x7, %o5 ! %o5 is multiple of 8 + and %o2, 0x7, %o2 ! residue bytes in %o2 + add %o2, 8, %o2 + sub %o5, 8, %o5 ! insure we do not load past end of src + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword +.Lunalign_by8: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 + faligndata %f0, %f2, %f16 + subcc %o5, 8, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) + fsrc2 %f2, %f0 + bgu,pt %xcc, .Lunalign_by8 + add %o0, 8, %o0 + +.Lunalign_short: +#ifdef NON_USER_COPY + VISExitHalfFast +#else + VISExitHalf +#endif + ba .Lsmallrest + nop + +/* + * This is a special case of nested memcpy. This can happen when kernel + * calls unaligned memcpy back to back without saving FP registers. We need + * traps(context switch) to save/restore FP registers. If the kernel calls + * memcpy without this trap sequence we will hit FP corruption. Let's use + * the normal integer load/store method in this case. + */ + +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail_cp: + or %o0, %o1, %g2 +#endif +.Lmedium_cp: + LOAD(prefetch, %o1 + 0x40, #n_reads_strong) + andcc %g2, 0x7, %g0 + bne,pn %xcc, .Lmedium_unaligned_cp + nop + +.Lmedium_noprefetch_cp: + andncc %o2, 0x20 - 1, %o5 + be,pn %xcc, 2f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) + add %o1, 0x20, %o1 + subcc %o5, 0x20, %o5 + EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) + bne,pt %xcc, 1b + add %o0, 0x20, %o0 +2: andcc %o2, 0x18, %o5 + be,pt %xcc, 3f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + add %o0, 0x08, %o0 + subcc %o5, 0x08, %o5 + bne,pt %xcc, 1b + EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) +3: brz,pt %o2, .Lexit_cp + cmp %o2, 0x04 + bl,pn %xcc, .Ltiny_cp + nop + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 0x04, %o1 + add %o0, 0x04, %o0 + subcc %o2, 0x04, %o2 + bne,pn %xcc, .Ltiny_cp + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) + ba,a,pt %xcc, .Lexit_cp + +.Lmedium_unaligned_cp: + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %o3 + and %o3, 0x7, %o3 + brz,pt %o3, 2f + sub %o2, %o3, %o2 + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 1, %o1 + subcc %o3, 1, %o3 + add %o0, 1, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) +2: + and %o1, 0x7, %o3 + brz,pn %o3, .Lmedium_noprefetch_cp + sll %o3, 3, %o3 + mov 64, %g2 + sub %g2, %o3, %g2 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) + sllx %o4, %o3, %o4 + andn %o2, 0x08 - 1, %o5 + sub %o2, %o5, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + subcc %o5, 0x08, %o5 + srlx %g3, %g2, %g7 + or %g7, %o4, %g7 + EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) + add %o0, 0x08, %o0 + bne,pt %xcc, 1b + sllx %g3, %o3, %o4 + srl %o3, 3, %o3 + add %o1, %o3, %o1 + brz,pn %o2, .Lexit_cp + nop + ba,pt %xcc, .Lsmall_unaligned_cp + +.Ltiny_cp: + EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) + ba,pt %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) + +.Lsmall_cp: + andcc %g2, 0x3, %g0 + bne,pn %xcc, .Lsmall_unaligned_cp + andn %o2, 0x4 - 1, %o5 + sub %o2, %o5, %o2 +1: + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x04, %o1 + subcc %o5, 0x04, %o5 + add %o0, 0x04, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) + brz,pt %o2, .Lexit_cp + nop + ba,a,pt %xcc, .Ltiny_cp + +.Lsmall_unaligned_cp: +1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 1, %o1 + add %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) + ba,a,pt %xcc, .Lexit_cp + +.Lsmallrest: + tst %o2 + bz,pt %xcc, .Lsmallx + cmp %o2, 4 + blt,pn %xcc, .Lsmallleft3 + nop + sub %o2, 3, %o2 +.Lsmallnotalign4: + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte + subcc %o2, 4, %o2 ! reduce count by 4 + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 + add %o1, 4, %o1 ! advance SRC by 4 + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) + EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) + add %o0, 4, %o0 ! advance DST by 4 + EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) + EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) + bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain + EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) + addcc %o2, 3, %o2 ! restore count + bz,pt %xcc, .Lsmallx +.Lsmallleft3: ! 1, 2, or 3 bytes remain + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte + subcc %o2, 1, %o2 + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte + EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte + EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte +.Lsmallx: + retl + mov EX_RETVAL(%g1), %o0 +.Lsmallfin: + tst %o2 + bnz,pn %xcc, .Lsmallleft3 + nop + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 +.Lexit_cp: + retl + mov EX_RETVAL(%g1), %o0 + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/M7memset.S b/arch/sparc/lib/M7memset.S new file mode 100644 index 000000000000..62ea91b3a6b8 --- /dev/null +++ b/arch/sparc/lib/M7memset.S @@ -0,0 +1,352 @@ +/* + * M7memset.S: SPARC M7 optimized memset. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +/* + * M7memset.S: M7 optimized memset. + * + * char *memset(sp, c, n) + * + * Set an array of n chars starting at sp to the character c. + * Return sp. + * + * Fast assembler language version of the following C-program for memset + * which represents the `standard' for the C-library. + * + * void * + * memset(void *sp1, int c, size_t n) + * { + * if (n != 0) { + * char *sp = sp1; + * do { + * *sp++ = (char)c; + * } while (--n != 0); + * } + * return (sp1); + * } + * + * The algorithm is as follows : + * + * For small 6 or fewer bytes stores, bytes will be stored. + * + * For less than 32 bytes stores, align the address on 4 byte boundary. + * Then store as many 4-byte chunks, followed by trailing bytes. + * + * For sizes greater than 32 bytes, align the address on 8 byte boundary. + * if (count >= 64) { + * store 8-bytes chunks to align the address on 64 byte boundary + * if (value to be set is zero && count >= MIN_ZERO) { + * Using BIS stores, set the first long word of each + * 64-byte cache line to zero which will also clear the + * other seven long words of the cache line. + * } + * else if (count >= MIN_LOOP) { + * Using BIS stores, set the first long word of each of + * ST_CHUNK cache lines (64 bytes each) before the main + * loop is entered. + * In the main loop, continue pre-setting the first long + * word of each cache line ST_CHUNK lines in advance while + * setting the other seven long words (56 bytes) of each + * cache line until fewer than ST_CHUNK*64 bytes remain. + * Then set the remaining seven long words of each cache + * line that has already had its first long word set. + * } + * store remaining data in 64-byte chunks until less than + * 64 bytes remain. + * } + * Store as many 8-byte chunks, followed by trailing bytes. + * + * BIS = Block Init Store + * Doing the advance store of the first element of the cache line + * initiates the displacement of a cache line while only using a single + * instruction in the pipeline. That avoids various pipeline delays, + * such as filling the miss buffer. The performance effect is + * similar to prefetching for normal stores. + * The special case for zero fills runs faster and uses fewer instruction + * cycles than the normal memset loop. + * + * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence + * BIS stores must be followed by a membar #StoreStore. The benefit of + * the BIS store must be balanced against the cost of the membar operation. + */ + +/* + * ASI_STBI_P marks the cache line as "least recently used" + * which means if many threads are active, it has a high chance + * of being pushed out of the cache between the first initializing + * store and the final stores. + * Thus, we use ASI_STBIMRU_P which marks the cache line as + * "most recently used" for all but the last store to the cache line. + */ + +#include <asm/asi.h> +#include <asm/page.h> + +#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P +#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P + + +#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */ +#define MIN_LOOP 16320 +#define MIN_ZERO 512 + + .section ".text" + .align 32 + +/* + * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE) + * (can create a more optimized version later.) + */ + .globl M7clear_page + .globl M7clear_user_page +M7clear_page: /* clear_page(dest) */ +M7clear_user_page: + set PAGE_SIZE, %o1 + /* fall through into bzero code */ + + .size M7clear_page,.-M7clear_page + .size M7clear_user_page,.-M7clear_user_page + +/* + * Define bzero(dest, n) as memset(dest, 0, n) + * (can create a more optimized version later.) + */ + .globl M7bzero +M7bzero: /* bzero(dest, size) */ + mov %o1, %o2 + mov 0, %o1 + /* fall through into memset code */ + + .size M7bzero,.-M7bzero + + .global M7memset + .type M7memset, #function + .register %g3, #scratch +M7memset: + mov %o0, %o5 ! copy sp1 before using it + cmp %o2, 7 ! if small counts, just write bytes + bleu,pn %xcc, .wrchar + and %o1, 0xff, %o1 ! o1 is (char)c + + sll %o1, 8, %o3 + or %o1, %o3, %o1 ! now o1 has 2 bytes of c + sll %o1, 16, %o3 + cmp %o2, 32 + blu,pn %xcc, .wdalign + or %o1, %o3, %o1 ! now o1 has 4 bytes of c + + sllx %o1, 32, %o3 + or %o1, %o3, %o1 ! now o1 has 8 bytes of c + +.dbalign: + andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? + bz,pt %xcc, .blkalign ! already long word aligned + sub %o3, 8, %o3 ! -(bytes till long word aligned) + + add %o2, %o3, %o2 ! update o2 with new count + ! Set -(%o3) bytes till sp1 long word aligned +1: stb %o1, [%o5] ! there is at least 1 byte to set + inccc %o3 ! byte clearing loop + bl,pt %xcc, 1b + inc %o5 + + ! Now sp1 is long word aligned (sp1 is found in %o5) +.blkalign: + cmp %o2, 64 ! check if there are 64 bytes to set + blu,pn %xcc, .wrshort + mov %o2, %o3 + + andcc %o5, 63, %o3 ! is sp1 block aligned? + bz,pt %xcc, .blkwr ! now block aligned + sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) + add %o2, %o3, %o2 ! o2 is the remainder + + ! Store -(%o3) bytes till dst is block (64 byte) aligned. + ! Use long word stores. + ! Recall that dst is already long word aligned +1: + addcc %o3, 8, %o3 + stx %o1, [%o5] + bl,pt %xcc, 1b + add %o5, 8, %o5 + + ! Now sp1 is block aligned +.blkwr: + andn %o2, 63, %o4 ! calculate size of blocks in bytes + brz,pn %o1, .wrzero ! special case if c == 0 + and %o2, 63, %o3 ! %o3 = bytes left after blk stores. + + set MIN_LOOP, %g1 + cmp %o4, %g1 ! check there are enough bytes to set + blu,pn %xcc, .short_set ! to justify cost of membar + ! must be > pre-cleared lines + nop + + ! initial cache-clearing stores + ! get store pipeline moving + rd %asi, %g3 ! save %asi to be restored later + wr %g0, ASI_STBIMRU_P, %asi + + ! Primary memset loop for large memsets +.wr_loop: + sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment + mov ST_CHUNK, %g1 +.wr_loop_start: + stxa %o1, [%o5+8]%asi + subcc %g1, 4, %g1 + stxa %o1, [%o5+8+64]%asi + add %o5, 256, %o5 + stxa %o1, [%o5+8-128]%asi + bgu %xcc, .wr_loop_start + stxa %o1, [%o5+8-64]%asi + + sub %o5, ST_CHUNK*64, %o5 ! reset %o5 + mov ST_CHUNK, %g1 + +.wr_loop_rest: + stxa %o1, [%o5+8+8]%asi + sub %o4, 64, %o4 + stxa %o1, [%o5+16+8]%asi + subcc %g1, 1, %g1 + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu %xcc, .wr_loop_rest + stxa %o1, [%o5]ASI_STBI_P + + ! If more than ST_CHUNK*64 bytes remain to set, continue + ! setting the first long word of each cache line in advance + ! to keep the store pipeline moving. + + cmp %o4, ST_CHUNK*64 + bge,pt %xcc, .wr_loop_start + mov ST_CHUNK, %g1 + + brz,a,pn %o4, .asi_done + add %o5, 8, %o5 ! restore %o5 offset + +.wr_loop_small: + stxa %o1, [%o5+8]%asi + stxa %o1, [%o5+8+8]%asi + stxa %o1, [%o5+16+8]%asi + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + subcc %o4, 64, %o4 + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu,pt %xcc, .wr_loop_small + stxa %o1, [%o5]ASI_STBI_P + + ba .asi_done + add %o5, 8, %o5 ! restore %o5 offset + + ! Special case loop for zero fill memsets + ! For each 64 byte cache line, single STBI to first element + ! clears line +.wrzero: + cmp %o4, MIN_ZERO ! check if enough bytes to set + ! to pay %asi + membar cost + blu %xcc, .short_set + nop + sub %o4, 256, %o4 + +.wrzero_loop: + mov 64, %g3 + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 256, %o4 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o5, 256, %o5 + sub %g3, 192, %g3 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %g3, 64, %g3 + bge,pt %xcc, .wrzero_loop + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o4, 256, %o4 + + brz,pn %o4, .bsi_done + nop + +.wrzero_small: + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 64, %o4 + bgu,pt %xcc, .wrzero_small + add %o5, 64, %o5 + ba,a .bsi_done + +.asi_done: + wr %g3, 0x0, %asi ! restored saved %asi +.bsi_done: + membar #StoreStore ! required by use of Block Store Init + +.short_set: + cmp %o4, 64 ! check if 64 bytes to set + blu %xcc, 5f + nop +4: ! set final blocks of 64 bytes + stx %o1, [%o5] + stx %o1, [%o5+8] + stx %o1, [%o5+16] + stx %o1, [%o5+24] + subcc %o4, 64, %o4 + stx %o1, [%o5+32] + stx %o1, [%o5+40] + add %o5, 64, %o5 + stx %o1, [%o5-16] + bgu,pt %xcc, 4b + stx %o1, [%o5-8] + +5: + ! Set the remaining long words +.wrshort: + subcc %o3, 8, %o3 ! Can we store any long words? + blu,pn %xcc, .wrchars + and %o2, 7, %o2 ! calc bytes left after long words +6: + subcc %o3, 8, %o3 + stx %o1, [%o5] ! store the long words + bgeu,pt %xcc, 6b + add %o5, 8, %o5 + +.wrchars: ! check for extra chars + brnz %o2, .wrfin + nop + retl + nop + +.wdalign: + andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary + bz,pn %xcc, .wrword + andn %o2, 3, %o3 ! create word sized count in %o3 + + dec %o2 ! decrement count + stb %o1, [%o5] ! clear a byte + b .wdalign + inc %o5 ! next byte + +.wrword: + subcc %o3, 4, %o3 + st %o1, [%o5] ! 4-byte writing loop + bnz,pt %xcc, .wrword + add %o5, 4, %o5 + + and %o2, 3, %o2 ! leftover count, if any + +.wrchar: + ! Set the remaining bytes, if any + brz %o2, .exit + nop +.wrfin: + deccc %o2 + stb %o1, [%o5] + bgu,pt %xcc, .wrfin + inc %o5 +.exit: + retl ! %o0 was preserved + nop + + .size M7memset,.-M7memset diff --git a/arch/sparc/lib/M7patch.S b/arch/sparc/lib/M7patch.S new file mode 100644 index 000000000000..9000b7bc5f2b --- /dev/null +++ b/arch/sparc/lib/M7patch.S @@ -0,0 +1,51 @@ +/* + * M7patch.S: Patch generic routines with M7 variant. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +#include <linux/linkage.h> + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + +ENTRY(m7_patch_copyops) + NG_DO_PATCH(memcpy, M7memcpy) + NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) + NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) + retl + nop +ENDPROC(m7_patch_copyops) + +ENTRY(m7_patch_bzero) + NG_DO_PATCH(memset, M7memset) + NG_DO_PATCH(__bzero, M7bzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop +ENDPROC(m7_patch_bzero) + +ENTRY(m7_patch_pageops) + NG_DO_PATCH(copy_user_page, NG4copy_user_page) + NG_DO_PATCH(_clear_page, M7clear_page) + NG_DO_PATCH(clear_user_page, M7clear_user_page) + retl + nop +ENDPROC(m7_patch_pageops) diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 07c03e72d812..a1a2d39ec96e 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -36,6 +36,11 @@ lib-$(CONFIG_SPARC64) += NG2patch.o lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o +lib-$(CONFIG_SPARC64) += Memcpy_utils.o + +lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o +lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o + lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S new file mode 100644 index 000000000000..64fbac28b3db --- /dev/null +++ b/arch/sparc/lib/Memcpy_utils.S @@ -0,0 +1,345 @@ +#ifndef __ASM_MEMCPY_UTILS +#define __ASM_MEMCPY_UTILS + +#include <linux/linkage.h> +#include <asm/asi.h> +#include <asm/visasm.h> + +ENTRY(__restore_asi_fp) + VISExitHalf + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi_fp) + +ENTRY(__restore_asi) + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi) + +ENTRY(memcpy_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(memcpy_retl_o2) +ENTRY(memcpy_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(memcpy_retl_o2_plus_1) +ENTRY(memcpy_retl_o2_plus_3) + ba,pt %xcc, __restore_asi + add %o2, 3, %o0 +ENDPROC(memcpy_retl_o2_plus_3) +ENTRY(memcpy_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(memcpy_retl_o2_plus_4) +ENTRY(memcpy_retl_o2_plus_5) + ba,pt %xcc, __restore_asi + add %o2, 5, %o0 +ENDPROC(memcpy_retl_o2_plus_5) +ENTRY(memcpy_retl_o2_plus_6) + ba,pt %xcc, __restore_asi + add %o2, 6, %o0 +ENDPROC(memcpy_retl_o2_plus_6) +ENTRY(memcpy_retl_o2_plus_7) + ba,pt %xcc, __restore_asi + add %o2, 7, %o0 +ENDPROC(memcpy_retl_o2_plus_7) +ENTRY(memcpy_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_8) +ENTRY(memcpy_retl_o2_plus_15) + ba,pt %xcc, __restore_asi + add %o2, 15, %o0 +ENDPROC(memcpy_retl_o2_plus_15) +ENTRY(memcpy_retl_o2_plus_15_8) + add %o2, 15, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_15_8) +ENTRY(memcpy_retl_o2_plus_16) + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_16) +ENTRY(memcpy_retl_o2_plus_24) + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_24) +ENTRY(memcpy_retl_o2_plus_31) + ba,pt %xcc, __restore_asi + add %o2, 31, %o0 +ENDPROC(memcpy_retl_o2_plus_31) +ENTRY(memcpy_retl_o2_plus_32) + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_32) +ENTRY(memcpy_retl_o2_plus_31_32) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_31_32) +ENTRY(memcpy_retl_o2_plus_31_24) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_31_24) +ENTRY(memcpy_retl_o2_plus_31_16) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_31_16) +ENTRY(memcpy_retl_o2_plus_31_8) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_31_8) +ENTRY(memcpy_retl_o2_plus_63) + ba,pt %xcc, __restore_asi + add %o2, 63, %o0 +ENDPROC(memcpy_retl_o2_plus_63) +ENTRY(memcpy_retl_o2_plus_63_64) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 64, %o0 +ENDPROC(memcpy_retl_o2_plus_63_64) +ENTRY(memcpy_retl_o2_plus_63_56) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 56, %o0 +ENDPROC(memcpy_retl_o2_plus_63_56) +ENTRY(memcpy_retl_o2_plus_63_48) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 48, %o0 +ENDPROC(memcpy_retl_o2_plus_63_48) +ENTRY(memcpy_retl_o2_plus_63_40) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 40, %o0 +ENDPROC(memcpy_retl_o2_plus_63_40) +ENTRY(memcpy_retl_o2_plus_63_32) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_63_32) +ENTRY(memcpy_retl_o2_plus_63_24) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_63_24) +ENTRY(memcpy_retl_o2_plus_63_16) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_63_16) +ENTRY(memcpy_retl_o2_plus_63_8) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_63_8) +ENTRY(memcpy_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5) +ENTRY(memcpy_retl_o2_plus_o5_plus_1) + add %o5, 1, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_1) +ENTRY(memcpy_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_4) +ENTRY(memcpy_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8) +ENTRY(memcpy_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16) +ENTRY(memcpy_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24) +ENTRY(memcpy_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32) +ENTRY(memcpy_retl_o2_plus_o5_64) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_64) +ENTRY(memcpy_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1) +ENTRY(memcpy_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_1) +ENTRY(memcpy_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_8) +ENTRY(memcpy_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4) +ENTRY(memcpy_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8) +ENTRY(memcpy_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16) +ENTRY(memcpy_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24) +ENTRY(memcpy_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32) +ENTRY(memcpy_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40) +ENTRY(memcpy_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48) +ENTRY(memcpy_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56) +ENTRY(memcpy_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64) +ENTRY(memcpy_retl_o2_plus_o5_plus_64) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64) +ENTRY(memcpy_retl_o2_plus_o3_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp) + add %o3, 1, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp) + add %o3, 4, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp) +ENTRY(memcpy_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp) + add %o5, 56, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp) + add %o5, 48, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp) + add %o5, 40, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp) + +#endif diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 78ea962edcbe..b5dacd1d2078 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -94,155 +94,6 @@ .text #ifndef EX_RETVAL #define EX_RETVAL(x) x -__restore_asi_fp: - VISExitHalf -__restore_asi: - retl - wr %g0, ASI_AIUS, %asi - -ENTRY(NG4_retl_o2) - ba,pt %xcc, __restore_asi - mov %o2, %o0 -ENDPROC(NG4_retl_o2) -ENTRY(NG4_retl_o2_plus_1) - ba,pt %xcc, __restore_asi - add %o2, 1, %o0 -ENDPROC(NG4_retl_o2_plus_1) -ENTRY(NG4_retl_o2_plus_4) - ba,pt %xcc, __restore_asi - add %o2, 4, %o0 -ENDPROC(NG4_retl_o2_plus_4) -ENTRY(NG4_retl_o2_plus_o5) - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5) -ENTRY(NG4_retl_o2_plus_o5_plus_4) - add %o5, 4, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_4) -ENTRY(NG4_retl_o2_plus_o5_plus_8) - add %o5, 8, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_8) -ENTRY(NG4_retl_o2_plus_o5_plus_16) - add %o5, 16, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_16) -ENTRY(NG4_retl_o2_plus_o5_plus_24) - add %o5, 24, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_24) -ENTRY(NG4_retl_o2_plus_o5_plus_32) - add %o5, 32, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_32) -ENTRY(NG4_retl_o2_plus_g1) - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1) -ENTRY(NG4_retl_o2_plus_g1_plus_1) - add %g1, 1, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_1) -ENTRY(NG4_retl_o2_plus_g1_plus_8) - add %g1, 8, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_8) -ENTRY(NG4_retl_o2_plus_o4) - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4) -ENTRY(NG4_retl_o2_plus_o4_plus_8) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8) -ENTRY(NG4_retl_o2_plus_o4_plus_16) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16) -ENTRY(NG4_retl_o2_plus_o4_plus_24) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24) -ENTRY(NG4_retl_o2_plus_o4_plus_32) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32) -ENTRY(NG4_retl_o2_plus_o4_plus_40) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40) -ENTRY(NG4_retl_o2_plus_o4_plus_48) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48) -ENTRY(NG4_retl_o2_plus_o4_plus_56) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56) -ENTRY(NG4_retl_o2_plus_o4_plus_64) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64) -ENTRY(NG4_retl_o2_plus_o4_fp) - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) #endif .align 64 @@ -275,12 +126,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong) @@ -305,43 +156,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, .Llarge_aligned sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 8, %o1 subcc %g1, 8, %g1 add %o0, 8, %o0 bne,pt %icc, 1b - EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) + EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8) .Llarge_aligned: /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4) add %o1, 0x40, %o1 - EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) - EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) - EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b LOAD(prefetch, %o1 + 0x200, #n_reads_strong) @@ -367,17 +218,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) -1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -386,14 +237,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) - EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) - EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) - EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) - EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) - EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) - EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) - EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) @@ -421,38 +272,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andncc %o2, 0x20 - 1, %o5 be,pn %icc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) - EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %icc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %icc, 1b - EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit cmp %o2, 0x04 bl,pn %icc, .Ltiny nop - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %icc, .Ltiny - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %icc, .Lexit .Lmedium_unaligned: /* First get dest 8 byte aligned. */ @@ -461,12 +312,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 2f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %g1 brz,pn %g1, .Lmedium_noprefetch @@ -474,16 +325,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov 64, %g2 sub %g2, %g1, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %g1, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b sllx %g3, %g1, %o4 @@ -494,17 +345,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ba,pt %icc, .Lsmall_unaligned .Ltiny: - EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2) ba,pt %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2) .Lsmall: andcc %g2, 0x3, %g0 @@ -512,23 +363,23 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %icc, 1b - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit nop ba,a,pt %icc, .Ltiny .Lsmall_unaligned: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %icc, 1b - EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %icc, .Lexit nop .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 5a8cb37f0a3b..f9b42b3c63b0 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -168,18 +168,25 @@ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srlx %o2, 31, %g2 cmp %g2, 0 + + /* software trap 5 "Range Check" if dst >= 0x80000000 */ tne %xcc, 5 PREAMBLE mov %o0, %o4 + + /* if len == 0 */ cmp %o2, 0 - be,pn %XCC, 85f + be,pn %XCC, end_return or %o0, %o1, %o3 + + /* if len < 16 */ cmp %o2, 16 - blu,a,pn %XCC, 80f + blu,a,pn %XCC, less_than_16 or %o3, %o2, %o3 + /* if len < 192 */ cmp %o2, (3 * 64) - blu,pt %XCC, 70f + blu,pt %XCC, less_than_192 andcc %o3, 0x7, %g0 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve @@ -362,7 +369,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ cmp %o2, 0 add %o1, %g1, %o1 VISExitHalf - be,pn %XCC, 85f + be,pn %XCC, end_return sub %o0, %o1, %o3 andcc %g1, 0x7, %g0 @@ -392,14 +399,15 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, 2, %o2 1: andcc %o2, 0x1, %g0 - be,pt %icc, 85f + be,pt %icc, end_return nop EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) - ba,pt %xcc, 85f + ba,pt %xcc, end_return EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) .align 64 -70: /* 16 < len <= 64 */ + /* 16 <= len < 192 */ +less_than_192: bne,pn %XCC, 75f sub %o0, %o1, %o3 @@ -429,7 +437,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 - be,pt %XCC, 85f + be,pt %XCC, end_return nop ba,pt %xcc, 90f nop @@ -475,13 +483,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srl %g1, 3, %g1 andcc %o2, 0x7, %o2 - be,pn %icc, 85f + be,pn %icc, end_return add %o1, %g1, %o1 ba,pt %xcc, 90f sub %o0, %o1, %o3 .align 64 -80: /* 0 < len <= 16 */ + /* 0 < len < 16 */ +less_than_16: andcc %o3, 0x3, %g0 bne,pn %XCC, 90f sub %o0, %o1, %o3 @@ -493,7 +502,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bgu,pt %XCC, 1b add %o1, 4, %o1 -85: retl +end_return: + retl mov EX_RETVAL(%o4), %o0 .align 32 diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index f80cfc64c55b..d809099ffd47 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 1; } +static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, + unsigned long end, int write, struct page **pages, + int *nr) +{ + struct page *head, *page; + int refs; + + if (!(pud_val(pud) & _PAGE_VALID)) + return 0; + + if (write && !pud_write(pud)) + return 0; + + refs = 0; + page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + head = compound_head(page); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pud_val(pud) != pud_val(*pudp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + return 1; +} + static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + if (unlikely(pud_large(pud))) { + if (!gup_huge_pud(pudp, pud, addr, next, + write, pages, nr)) + return 0; + } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 28ee8d8ffa07..bcd8cdbc377f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_16GB_SHIFT: + hugepage_size = _PAGE_SZ16GB_4V; + pte_val(entry) |= _PAGE_PUD_HUGE; + break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ16GB_4V: + shift = HPAGE_16GB_SHIFT; + break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; @@ -259,22 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return NULL; - - if (sz >= PMD_SIZE) - pte = (pte_t *)pmd; - else - pte = pte_alloc_map(mm, pmd, addr); - } - - return pte; + if (!pud) + return NULL; + if (sz >= PUD_SIZE) + return (pte_t *)pud; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + if (sz >= PMD_SIZE) + return (pte_t *)pmd; + return pte_alloc_map(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -283,34 +287,40 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (!pgd_none(*pgd)) { - pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); - } - } - } - - return pte; + if (pgd_none(*pgd)) + return NULL; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + if (is_hugetlb_pud(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return NULL; + if (is_hugetlb_pmd(*pmd)) + return (pte_t *)pmd; + return pte_offset_map(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int i, nptes, orig_shift, shift; - unsigned long size; + unsigned int nptes, orig_shift, shift; + unsigned long i, size; pte_t orig; size = huge_tte_to_size(entry); - shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; + else + shift = PAGE_SHIFT; + nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) @@ -333,19 +343,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned int i, nptes, hugepage_shift; + unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); - if (size >= HPAGE_SIZE) - nptes = size >> PMD_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; else - nptes = size >> PAGE_SHIFT; + shift = PAGE_SHIFT; - hugepage_shift = pte_none(entry) ? PAGE_SHIFT : - huge_tte_to_shift(entry); + nptes = size >> shift; + orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; @@ -354,11 +368,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); - maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, - hugepage_shift); + orig_shift); return entry; } @@ -371,7 +385,8 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return 0; + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, @@ -435,8 +450,11 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); + if (is_hugetlb_pud(*pud)) + pud_clear(pud); + else + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index afa0099f3748..b2ba410b26f4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void) arch_initcall(hugetlbpage_init); +static void __init pud_huge_patch(void) +{ + struct pud_huge_patch_entry *p; + unsigned long addr; + + p = &__pud_huge_patch; + addr = p->addr; + *(unsigned int *)addr = p->insn; + + __asm__ __volatile__("flush %0" : : "r" (addr)); +} + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_16GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_16GB; + hv_pgsz_idx = HV_PGSZ_IDX_16GB; + pud_huge_patch(); + break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; @@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * { struct mm_struct *mm; unsigned long flags; + bool is_huge_tsb; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); + is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && - is_hugetlb_pmd(__pmd(pte_val(pte)))) { - /* We are fabricating 8MB pages using 4MB real hw pages. */ - pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, - address, pte_val(pte)); - } else + if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { + unsigned long hugepage_size = PAGE_SIZE; + + if (is_vm_hugetlb_page(vma)) + hugepage_size = huge_page_size(hstate_vma(vma)); + + if (hugepage_size >= PUD_SIZE) { + unsigned long mask = 0x1ffc00000UL; + + /* Transfer bits [32:22] from address to resolve + * at 4M granularity. + */ + pte_val(pte) &= ~mask; + pte_val(pte) |= (address & mask); + } else if (hugepage_size >= PMD_SIZE) { + /* We are fabricating 8MB pages using 4MB + * real hw pages. + */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + } + + if (hugepage_size >= PMD_SIZE) { + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, + REAL_HPAGE_SHIFT, address, pte_val(pte)); + is_huge_tsb = true; + } + } #endif + if (!is_huge_tsb) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); |