diff options
author | Eugeniy Paltsev | 2019-01-16 14:29:50 +0300 |
---|---|---|
committer | Vineet Gupta | 2019-02-21 11:03:15 -0800 |
commit | 252f6e8eae909bc075a1b1e3b9efb095ae4c0b56 (patch) | |
tree | 71cd3352389d5a4fe7b95832f6911a8e7450fc46 /arch | |
parent | f17b5f06cb92ef2250513a1e154c47b78df07d40 (diff) |
ARCv2: Enable unaligned access in early ASM code
It is currently done in arc_init_IRQ() which might be too late
considering gcc 7.3.1 onwards (GNU 2018.03) generates unaligned
memory accesses by default
Cc: stable@vger.kernel.org #4.4+
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
[vgupta: rewrote changelog]
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arc/kernel/head.S | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..26e33a8b2d18 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> +#include <asm/irqflags.h> .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits |