diff options
author | Linus Torvalds | 2017-07-02 11:53:44 -0700 |
---|---|---|
committer | Linus Torvalds | 2017-07-02 11:53:44 -0700 |
commit | 79c496816963aa0561868b43c2c950dfeb282639 (patch) | |
tree | e127b465e24c9ba27d996fcb7b70aa67f60ffb3e /arch | |
parent | 3a61a54cd72c93afa3b7246e3ed06f26ed37fde7 (diff) | |
parent | 854236363370995a609a10b03e35fd3dc5e9e4a1 (diff) |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS fixes from Ralf Baechle:
"Here's a final round of fixes for 4.12:
- Fix misordered instructions in assembly code making kenel startup
via UHB unreliable.
- Fix special case of MADDF and MADDF emulation.
- Fix alignment issue in address calculation in pm-cps on 64 bit.
- Fix IRQ tracing & lockdep when rescheduling
- Systems with MAARs require post-DMA cache flushes.
The reordering fix and the MADDF/MSUBF fix have sat in linux-next for
a number of days. The others haven't propagated from my pull tree to
linux-next yet but all have survived manual testing and Imagination's
automated test system and there are no pending bug reports"
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
MIPS: Avoid accidental raw backtrace
MIPS: Perform post-DMA cache flushes on systems with MAARs
MIPS: Fix IRQ tracing & lockdep when rescheduling
MIPS: pm-cps: Drop manual cache-line alignment of ready_count
MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately
MIPS: head: Reorder instructions missing a delay slot
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/pm-cps.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/mips/math-emu/dp_maddf.c | 5 | ||||
-rw-r--r-- | arch/mips/math-emu/sp_maddf.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 23 |
7 files changed, 33 insertions, 16 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 8d83fc2a96b7..38a302919e6b 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/compiler.h> +#include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> @@ -119,6 +120,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -155,6 +157,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index cf052204eb0a..d1bb506adc10 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point beq t0, t1, dtb_found #endif li t1, -2 - beq a0, t1, dtb_found move t2, a1 + beq a0, t1, dtb_found li t2, 0 dtb_found: diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 5f928c34c148..d99416094ba9 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9681b5877140..38dfa27730ff 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c index 4a2d03c72959..caa62f20a888 100644 --- a/arch/mips/math-emu/dp_maddf.c +++ b/arch/mips/math-emu/dp_maddf.c @@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, return ieee754dp_nanxcpt(z); case IEEE754_CLASS_DNORM: DPDNORMZ; - /* QNAN is handled separately below */ + /* QNAN and ZERO cases are handled separately below */ } switch (CLPAIR(xc, yc)) { @@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, } assert(rm & (DP_HIDDEN_BIT << 3)); + if (zc == IEEE754_CLASS_ZERO) + return ieee754dp_format(rs, re, rm); + /* And now the addition */ assert(zm & DP_HIDDEN_BIT); diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c index a8cd8b4f235e..c91d5e5d9b5f 100644 --- a/arch/mips/math-emu/sp_maddf.c +++ b/arch/mips/math-emu/sp_maddf.c @@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, return ieee754sp_nanxcpt(z); case IEEE754_CLASS_DNORM: SPDNORMZ; - /* QNAN is handled separately below */ + /* QNAN and ZERO cases are handled separately below */ } switch (CLPAIR(xc, yc)) { @@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, } assert(rm & (SP_HIDDEN_BIT << 3)); + if (zc == IEEE754_CLASS_ZERO) + return ieee754sp_format(rs, re, rm); + /* And now the addition */ assert(zm & SP_HIDDEN_BIT); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index fe8df14b6169..e08598c70b3e 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev, * systems and only the R10000 and R12000 are used in such systems, the * SGI IP28 Indigo² rsp. SGI IP32 aka O2. */ -static inline int cpu_needs_post_dma_flush(struct device *dev) +static inline bool cpu_needs_post_dma_flush(struct device *dev) { - return !plat_device_is_coherent(dev) && - (boot_cpu_type() == CPU_R10000 || - boot_cpu_type() == CPU_R12000 || - boot_cpu_type() == CPU_BMIPS5000); + if (plat_device_is_coherent(dev)) + return false; + + switch (boot_cpu_type()) { + case CPU_R10000: + case CPU_R12000: + case CPU_BMIPS5000: + return true; + + default: + /* + * Presence of MAARs suggests that the CPU supports + * speculatively prefetching data, and therefore requires + * the post-DMA flush/invalidate. + */ + return cpu_has_maar; + } } static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |