aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/archrandom.h112
-rw-r--r--arch/arm64/include/asm/asm-extable.h79
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h12
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h4
-rw-r--r--arch/arm64/include/asm/assembler.h35
-rw-r--r--arch/arm64/include/asm/barrier.h12
-rw-r--r--arch/arm64/include/asm/cache.h41
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpu_ops.h9
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/cpuidle.h15
-rw-r--r--arch/arm64/include/asm/efi.h7
-rw-r--r--arch/arm64/include/asm/el2_setup.h60
-rw-r--r--arch/arm64/include/asm/fixmap.h4
-rw-r--r--arch/arm64/include/asm/hwcap.h3
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/io.h65
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h18
-rw-r--r--arch/arm64/include/asm/kexec.h18
-rw-r--r--arch/arm64/include/asm/kvm_asm.h16
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/kvm_host.h205
-rw-r--r--arch/arm64/include/asm/memory.h17
-rw-r--r--arch/arm64/include/asm/mmu_context.h16
-rw-r--r--arch/arm64/include/asm/pci.h18
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h3
-rw-r--r--arch/arm64/include/asm/pgtable.h16
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/stacktrace.h62
-rw-r--r--arch/arm64/include/asm/stacktrace/common.h199
-rw-r--r--arch/arm64/include/asm/stacktrace/nvhe.h55
-rw-r--r--arch/arm64/include/asm/sysreg.h128
-rw-r--r--arch/arm64/include/asm/uaccess.h94
-rw-r--r--arch/arm64/include/asm/virt.h11
35 files changed, 772 insertions, 594 deletions
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 3a6b6d38c5b8..109e2a4454be 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -2,8 +2,6 @@
#ifndef _ASM_ARCHRANDOM_H
#define _ASM_ARCHRANDOM_H
-#ifdef CONFIG_ARCH_RANDOM
-
#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/kernel.h>
@@ -60,7 +58,7 @@ static inline bool __arm64_rndrrs(unsigned long *v)
return ok;
}
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
* Only support the generic interface after we have detected
@@ -68,27 +66,15 @@ static inline bool __must_check arch_get_random_long(unsigned long *v)
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
- return true;
- return false;
-}
-
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- if (cpus_have_const_cap(ARM64_HAS_RNG)) {
- unsigned long val;
-
- if (__arm64_rndr(&val)) {
- *v = val;
- return true;
- }
- }
- return false;
+ if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ return 1;
+ return 0;
}
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- struct arm_smccc_res res;
+ if (!max_longs)
+ return 0;
/*
* We prefer the SMCCC call, since its semantics (return actual
@@ -97,10 +83,23 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
* (the output of a pseudo RNG freshly seeded by a TRNG).
*/
if (smccc_trng_available) {
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ struct arm_smccc_res res;
+
+ max_longs = min_t(size_t, 3, max_longs);
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
if ((int)res.a0 >= 0) {
- *v = res.a3;
- return true;
+ switch (max_longs) {
+ case 3:
+ *v++ = res.a1;
+ fallthrough;
+ case 2:
+ *v++ = res.a2;
+ fallthrough;
+ case 1:
+ *v++ = res.a3;
+ break;
+ }
+ return max_longs;
}
}
@@ -110,32 +109,9 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
* enough to implement this API if no other entropy source exists.
*/
if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
- return true;
+ return 1;
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- struct arm_smccc_res res;
- unsigned long val;
-
- if (smccc_trng_available) {
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
- if ((int)res.a0 >= 0) {
- *v = res.a3 & GENMASK(31, 0);
- return true;
- }
- }
-
- if (cpus_have_const_cap(ARM64_HAS_RNG)) {
- if (__arm64_rndrrs(&val)) {
- *v = val;
- return true;
- }
- }
-
- return false;
+ return 0;
}
static inline bool __init __early_cpu_has_rndr(void)
@@ -145,34 +121,40 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
-static inline bool __init __must_check
-arch_get_random_seed_long_early(unsigned long *v)
+static inline size_t __init __must_check
+arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
+ if (!max_longs)
+ return 0;
+
if (smccc_trng_available) {
struct arm_smccc_res res;
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ max_longs = min_t(size_t, 3, max_longs);
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
if ((int)res.a0 >= 0) {
- *v = res.a3;
- return true;
+ switch (max_longs) {
+ case 3:
+ *v++ = res.a1;
+ fallthrough;
+ case 2:
+ *v++ = res.a2;
+ fallthrough;
+ case 1:
+ *v++ = res.a3;
+ break;
+ }
+ return max_longs;
}
}
if (__early_cpu_has_rndr() && __arm64_rndr(v))
- return true;
-
- return false;
-}
-#define arch_get_random_seed_long_early arch_get_random_seed_long_early
+ return 1;
-#else /* !CONFIG_ARCH_RANDOM */
-
-static inline bool __init smccc_probe_trng(void)
-{
- return false;
+ return 0;
}
+#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early
-#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index c39f2437e08e..980d1dd8e1a3 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -2,12 +2,27 @@
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
+#include <linux/bits.h>
+#include <asm/gpr-num.h>
+
#define EX_TYPE_NONE 0
-#define EX_TYPE_FIXUP 1
-#define EX_TYPE_BPF 2
-#define EX_TYPE_UACCESS_ERR_ZERO 3
+#define EX_TYPE_BPF 1
+#define EX_TYPE_UACCESS_ERR_ZERO 2
+#define EX_TYPE_KACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+/* Data fields for EX_TYPE_LOAD_UNALIGNED_ZEROPAD */
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR GENMASK(9, 5)
+
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
@@ -19,31 +34,45 @@
.short (data); \
.popsection;
+#define EX_DATA_REG(reg, gpr) \
+ (.L__gpr_num_##gpr << EX_DATA_REG_##reg##_SHIFT)
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __ASM_EXTABLE_RAW(insn, fixup, \
+ EX_TYPE_UACCESS_ERR_ZERO, \
+ ( \
+ EX_DATA_REG(ERR, err) | \
+ EX_DATA_REG(ZERO, zero) \
+ ))
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
+
+#define _ASM_EXTABLE_UACCESS(insn, fixup) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
/*
- * Create an exception table entry for `insn`, which will branch to `fixup`
+ * Create an exception table entry for uaccess `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
*/
- .macro _asm_extable, insn, fixup
- __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .macro _asm_extable_uaccess, insn, fixup
+ _ASM_EXTABLE_UACCESS(\insn, \fixup)
.endm
/*
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
* do nothing.
*/
- .macro _cond_extable, insn, fixup
- .ifnc \fixup,
- _asm_extable \insn, \fixup
+ .macro _cond_uaccess_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable_uaccess \insn, \fixup
.endif
.endm
#else /* __ASSEMBLY__ */
-#include <linux/bits.h>
#include <linux/stringify.h>
-#include <asm/gpr-num.h>
-
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".align 2\n" \
@@ -53,14 +82,6 @@
".short (" data ")\n" \
".popsection\n"
-#define _ASM_EXTABLE(insn, fixup) \
- __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
-
-#define EX_DATA_REG_ERR_SHIFT 0
-#define EX_DATA_REG_ERR GENMASK(4, 0)
-#define EX_DATA_REG_ZERO_SHIFT 5
-#define EX_DATA_REG_ZERO GENMASK(9, 5)
-
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
@@ -73,13 +94,23 @@
EX_DATA_REG(ZERO, zero) \
")")
+#define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_KACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
-#define EX_DATA_REG_DATA_SHIFT 0
-#define EX_DATA_REG_DATA GENMASK(4, 0)
-#define EX_DATA_REG_ADDR_SHIFT 5
-#define EX_DATA_REG_ADDR GENMASK(9, 5)
+#define _ASM_EXTABLE_UACCESS(insn, fixup) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
+#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 0557af834e03..75b211c98dea 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -61,7 +61,7 @@ alternative_else_nop_endif
#define USER(l, x...) \
9999: x; \
- _asm_extable 9999b, l
+ _asm_extable_uaccess 9999b, l
/*
* Generate the assembly for LDTR/STTR with exception table entries.
@@ -73,8 +73,8 @@ alternative_else_nop_endif
8889: ldtr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
+ _asm_extable_uaccess 8888b, \l;
+ _asm_extable_uaccess 8889b, \l;
.endm
.macro user_stp l, reg1, reg2, addr, post_inc
@@ -82,14 +82,14 @@ alternative_else_nop_endif
8889: sttr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
+ _asm_extable_uaccess 8888b,\l;
+ _asm_extable_uaccess 8889b,\l;
.endm
.macro user_ldst l, inst, reg, addr, post_inc
8888: \inst \reg, [\addr];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
+ _asm_extable_uaccess 8888b, \l;
.endm
#endif
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index ead62f7dd269..13ecc79854ee 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -59,9 +59,9 @@ alternative_else_nop_endif
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1
- ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
+ ubfx \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8
mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1
- ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
+ ubfx \tmp2, \tmp2, #ID_AA64ISAR2_EL1_APA3_SHIFT, #4
orr \tmp1, \tmp1, \tmp2
cbz \tmp1, .Lno_addr_auth\@
mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8c5a61aeaf8e..5846145be523 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -360,6 +360,20 @@ alternative_cb_end
.endm
/*
+ * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
+ *
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of _end.
+ */
+ .macro idmap_get_t0sz, reg
+ adrp \reg, _end
+ orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
+ clz \reg, \reg
+ .endm
+
+/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
*
@@ -423,7 +437,7 @@ alternative_endif
b.lo .Ldcache_op\@
dsb \domain
- _cond_extable .Ldcache_op\@, \fixup
+ _cond_uaccess_extable .Ldcache_op\@, \fixup
.endm
/*
@@ -462,7 +476,19 @@ alternative_endif
dsb ish
isb
- _cond_extable .Licache_op\@, \fixup
+ _cond_uaccess_extable .Licache_op\@, \fixup
+ .endm
+
+/*
+ * load_ttbr1 - install @pgtbl as a TTBR1 page table
+ * pgtbl preserved
+ * tmp1/tmp2 clobbered, either may overlap with pgtbl
+ */
+ .macro load_ttbr1, pgtbl, tmp1, tmp2
+ phys_to_ttbr \tmp1, \pgtbl
+ offset_ttbr1 \tmp1, \tmp2
+ msr ttbr1_el1, \tmp1
+ isb
.endm
/*
@@ -478,10 +504,7 @@ alternative_endif
isb
tlbi vmalle1
dsb nsh
- phys_to_ttbr \tmp, \page_table
- offset_ttbr1 \tmp, \tmp2
- msr ttbr1_el1, \tmp
- isb
+ load_ttbr1 \page_table, \tmp, \tmp2
.endm
/*
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 9f3e2c3d2ca0..2cfc4245d2e2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -50,13 +50,13 @@
#define pmr_sync() do {} while (0)
#endif
-#define mb() dsb(sy)
-#define rmb() dsb(ld)
-#define wmb() dsb(st)
+#define __mb() dsb(sy)
+#define __rmb() dsb(ld)
+#define __wmb() dsb(st)
-#define dma_mb() dmb(osh)
-#define dma_rmb() dmb(oshld)
-#define dma_wmb() dmb(oshst)
+#define __dma_mb() dmb(osh)
+#define __dma_rmb() dmb(oshld)
+#define __dma_wmb() dmb(oshst)
#define io_stop_wc() dgh()
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 7c2181c72116..ca9b487112cc 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -5,34 +5,9 @@
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
-#include <asm/cputype.h>
-#include <asm/mte-def.h>
-
-#define CTR_L1IP_SHIFT 14
-#define CTR_L1IP_MASK 3
-#define CTR_DMINLINE_SHIFT 16
-#define CTR_IMINLINE_SHIFT 0
-#define CTR_IMINLINE_MASK 0xf
-#define CTR_ERG_SHIFT 20
-#define CTR_CWG_SHIFT 24
-#define CTR_CWG_MASK 15
-#define CTR_IDC_SHIFT 28
-#define CTR_DIC_SHIFT 29
-
-#define CTR_CACHE_MINLINE_MASK \
- (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
-
-#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
-
-#define ICACHE_POLICY_VPIPT 0
-#define ICACHE_POLICY_RESERVED 1
-#define ICACHE_POLICY_VIPT 2
-#define ICACHE_POLICY_PIPT 3
-
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
#define CLIDR_LOUU_SHIFT 27
#define CLIDR_LOC_SHIFT 24
#define CLIDR_LOUIS_SHIFT 21
@@ -55,6 +30,10 @@
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
+#include <asm/cputype.h>
+#include <asm/mte-def.h>
+#include <asm/sysreg.h>
+
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
@@ -66,6 +45,12 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign()
#endif
+#define CTR_CACHE_MINLINE_MASK \
+ (0xf << CTR_EL0_DMINLINE_SHIFT | \
+ CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
+
+#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
+
#define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags;
@@ -86,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
static inline u32 cache_type_cwg(void)
{
- return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+ return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
}
#define __read_mostly __section(".data..read_mostly")
@@ -120,12 +105,12 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
{
u32 ctr = read_cpuid_cachetype();
- if (!(ctr & BIT(CTR_IDC_SHIFT))) {
+ if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
u64 clidr = read_sysreg(clidr_el1);
if (CLIDR_LOC(clidr) == 0 ||
(CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
- ctr |= BIT(CTR_IDC_SHIFT);
+ ctr |= BIT(CTR_EL0_IDC_SHIFT);
}
return ctr;
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 5a228e203ef9..37185e978aeb 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -105,13 +105,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_range flush_icache_range
/*
- * Cache maintenance functions used by the DMA API. No to be used directly.
- */
-extern void __dma_map_area(const void *, size_t, int);
-extern void __dma_unmap_area(const void *, size_t, int);
-extern void __dma_flush_area(const void *, size_t);
-
-/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 115cdec1ae87..fd7a92219eea 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
u64 reg_midr;
u64 reg_revidr;
u64 reg_gmid;
+ u64 reg_smidr;
u64 reg_id_aa64dfr0;
u64 reg_id_aa64dfr1;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index e95c4df83911..a444c8915e88 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -31,11 +31,6 @@
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
- * a proposed logical id.
- * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
- * to wrong parameters or error conditions. Called from the
- * CPU being suspended. Must be called with IRQs disabled.
*/
struct cpu_operations {
const char *name;
@@ -49,10 +44,6 @@ struct cpu_operations {
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif
-#ifdef CONFIG_CPU_IDLE
- int (*cpu_init_idle)(unsigned int);
- int (*cpu_suspend)(unsigned long);
-#endif
};
int __init init_cpu_ops(int cpu);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 14a8f3d93add..fd7d75a275f6 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,7 +11,7 @@
#include <asm/hwcap.h>
#include <asm/sysreg.h>
-#define MAX_CPU_FEATURES 64
+#define MAX_CPU_FEATURES 128
#define cpu_feature(x) KERNEL_HWCAP_ ## x
#ifndef __ASSEMBLY__
@@ -673,7 +673,7 @@ static inline bool supports_clearbhb(int scope)
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
- ID_AA64ISAR2_CLEARBHB_SHIFT);
+ ID_AA64ISAR2_EL1_BC_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void);
@@ -908,7 +908,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
}
extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override;
+extern struct arm64_ftr_override id_aa64zfr0_override;
+extern struct arm64_ftr_override id_aa64smfr0_override;
extern struct arm64_ftr_override id_aa64isar1_override;
extern struct arm64_ftr_override id_aa64isar2_override;
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 14a19d1141bd..2047713e097d 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -4,21 +4,6 @@
#include <asm/proc-fns.h>
-#ifdef CONFIG_CPU_IDLE
-extern int arm_cpuidle_init(unsigned int cpu);
-extern int arm_cpuidle_suspend(int index);
-#else
-static inline int arm_cpuidle_init(unsigned int cpu)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int arm_cpuidle_suspend(int index)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
#ifdef CONFIG_ARM64_PSEUDO_NMI
#include <asm/arch_gicv3.h>
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index ad55079abe47..439e2bc5d5d8 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -27,12 +27,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
__efi_fpsimd_begin(); \
})
+#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) \
-({ \
- efi_##f##_t *__f; \
- __f = p->f; \
- __efi_rt_asm_wrapper(__f, #f, args); \
-})
+ __efi_rt_asm_wrapper((p)->f, #f, args)
#define arch_efi_call_virt_teardown() \
({ \
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 34ceff08cac4..2630faa5bc08 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -129,64 +129,6 @@
msr cptr_el2, x0 // Disable copro. traps to EL2
.endm
-/* SVE register access */
-.macro __init_el2_nvhe_sve
- mrs x1, id_aa64pfr0_el1
- ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
- cbz x1, .Lskip_sve_\@
-
- bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
- msr cptr_el2, x0 // Disable copro. traps to EL2
- isb
- mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
- msr_s SYS_ZCR_EL2, x1 // length for EL1.
-.Lskip_sve_\@:
-.endm
-
-/* SME register access and priority mapping */
-.macro __init_el2_nvhe_sme
- mrs x1, id_aa64pfr1_el1
- ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
- cbz x1, .Lskip_sme_\@
-
- bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps
- msr cptr_el2, x0 // Disable copro. traps to EL2
- isb
-
- mrs x1, sctlr_el2
- orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
- msr sctlr_el2, x1
- isb
-
- mov x1, #0 // SMCR controls
-
- mrs_s x2, SYS_ID_AA64SMFR0_EL1
- ubfx x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
- cbz x2, .Lskip_sme_fa64_\@
-
- orr x1, x1, SMCR_ELx_FA64_MASK
-.Lskip_sme_fa64_\@:
-
- orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector
- msr_s SYS_SMCR_EL2, x1 // length for EL1.
-
- mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
- ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
- cbz x1, .Lskip_sme_\@
-
- msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
-
- mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
- ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
- cbz x1, .Lskip_sme_\@
-
- mrs_s x1, SYS_HCRX_EL2
- orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
- msr_s SYS_HCRX_EL2, x1
-
-.Lskip_sme_\@:
-.endm
-
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
@@ -250,8 +192,6 @@
__init_el2_hstr
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
- __init_el2_nvhe_sve
- __init_el2_nvhe_sme
__init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index daff882883f9..71ed5fdf718b 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -62,10 +62,12 @@ enum fixed_addresses {
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_RELOCATABLE
+ FIX_ENTRY_TRAMP_TEXT4, /* one extra slot for the data page */
+#endif
FIX_ENTRY_TRAMP_TEXT3,
FIX_ENTRY_TRAMP_TEXT2,
FIX_ENTRY_TRAMP_TEXT1,
- FIX_ENTRY_TRAMP_DATA,
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index aa443d8f8cfb..cef4ae7a3d8b 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -85,7 +85,7 @@
#define KERNEL_HWCAP_PACA __khwcap_feature(PACA)
#define KERNEL_HWCAP_PACG __khwcap_feature(PACG)
-#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 32)
+#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 64)
#define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP)
#define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2)
#define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES)
@@ -118,6 +118,7 @@
#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32)
#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
+#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 6aa2dc836db1..834bff720582 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -510,6 +510,9 @@ u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
unsigned int imm,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ bool is64bit);
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
enum aarch64_insn_register reg2,
enum aarch64_insn_register base,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 3995652daf81..877495a0fd0c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -91,7 +91,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
/* IO barriers */
-#define __iormb(v) \
+#define __io_ar(v) \
({ \
unsigned long tmp; \
\
@@ -108,39 +108,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
-#define __io_par(v) __iormb(v)
-#define __iowmb() dma_wmb()
-#define __iomb() dma_mb()
-
-/*
- * Relaxed I/O memory access primitives. These follow the Device memory
- * ordering rules but do not guarantee any ordering relative to Normal memory
- * accesses.
- */
-#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
-#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
-#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
-#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
-
-#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
-#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
-#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
-#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
-
-/*
- * I/O memory access primitives. Reads are ordered relative to any
- * following Normal memory access. Writes are ordered relative to any prior
- * Normal memory access.
- */
-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
+#define __io_bw() dma_wmb()
+#define __io_br(v)
+#define __io_aw(v)
-#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
-#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
-#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
-#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
+/* arm64-specific, don't use in portable drivers */
+#define __iormb(v) __io_ar(v)
+#define __iowmb() __io_bw()
+#define __iomb() dma_mb()
/*
* I/O port access primitives.
@@ -163,13 +138,16 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
/*
* I/O memory mapping functions.
*/
-extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
-extern void iounmap(volatile void __iomem *addr);
-extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
+#define ioremap_allowed ioremap_allowed
+
+#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), PROT_NORMAL_NC)
+#define ioremap_np(addr, size) \
+ ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
/*
* io{read,write}{16,32,64}be() macros
@@ -184,6 +162,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#include <asm-generic/io.h>
+#define ioremap_cache ioremap_cache
+static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
+{
+ if (pfn_is_map_memory(__phys_to_pfn(addr)))
+ return (void __iomem *)__phys_to_virt(addr);
+
+ return ioremap_prot(addr, size, PROT_NORMAL);
+}
+
/*
* More restrictive address range checking than the default implementation
* (PHYS_OFFSET and PHYS_MASK taken into account).
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 96dc0f7da258..02e59fa8f293 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -8,6 +8,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H
+#include <asm/boot.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sparsemem.h>
@@ -35,10 +36,8 @@
*/
#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
-#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
-#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
#endif
@@ -87,7 +86,14 @@
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
-#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+
+/* the initial ID map may need two extra pages if it needs to be extended */
+#if VA_BITS < 48
+#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
+#else
+#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
+#endif
+#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS
@@ -107,9 +113,11 @@
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#if ARM64_KERNEL_USES_PMD_MAPS
-#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
#else
-#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
#endif
/*
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 9839bfc163d7..559bfae26715 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
extern bool crash_is_nosave(unsigned long pfn);
extern void crash_prepare_suspend(void);
extern void crash_post_resume(void);
+
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#else
static inline bool crash_is_nosave(unsigned long pfn) {return false; }
static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
+struct kimage;
+
#if defined(CONFIG_KEXEC_CORE)
void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1,
unsigned long arg2);
+
+int machine_kexec_post_load(struct kimage *image);
+#define machine_kexec_post_load machine_kexec_post_load
+
+void arch_kexec_protect_crashkres(void);
+#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
+
+void arch_kexec_unprotect_crashkres(void);
+#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#define ARCH_HAS_KIMAGE_ARCH
@@ -113,9 +127,9 @@ struct kimage_arch {
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_image_ops;
-struct kimage;
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
-extern int arch_kimage_file_post_load_cleanup(struct kimage *image);
extern int load_other_segments(struct kimage *image,
unsigned long kernel_load_addr, unsigned long kernel_size,
char *initrd, unsigned long initrd_len,
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 2e277f2ed671..53035763e48e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
unsigned long vtcr;
};
+/*
+ * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
+ * hyp_panic() in non-protected mode.
+ *
+ * @stack_base: hyp VA of the hyp_stack base.
+ * @overflow_stack_base: hyp VA of the hyp_overflow_stack base.
+ * @fp: hyp FP where the backtrace begins.
+ * @pc: hyp PC where the backtrace begins.
+ */
+struct kvm_nvhe_stacktrace_info {
+ unsigned long stack_base;
+ unsigned long overflow_stack_base;
+ unsigned long fp;
+ unsigned long pc;
+};
+
/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr) \
({ \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 0e66edd3aff2..9bdba47f7e14 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -473,9 +473,18 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
{
- vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
+ WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
+ vcpu_set_flag(vcpu, INCREMENT_PC);
}
+#define kvm_pend_exception(v, e) \
+ do { \
+ WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
+ vcpu_set_flag((v), PENDING_EXCEPTION); \
+ vcpu_set_flag((v), e); \
+ } while (0)
+
+
static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index de32152cea04..f38ef299f13b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -325,8 +325,30 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Miscellaneous vcpu state flags */
- u64 flags;
+ /* Ownership of the FP regs */
+ enum {
+ FP_STATE_FREE,
+ FP_STATE_HOST_OWNED,
+ FP_STATE_GUEST_OWNED,
+ } fp_state;
+
+ /* Configuration flags, set once and for all before the vcpu can run */
+ u8 cflags;
+
+ /* Input flags to the hypervisor code, potentially cleared after use */
+ u8 iflags;
+
+ /* State flags for kernel bookkeeping, unused by the hypervisor code */
+ u8 sflags;
+
+ /*
+ * Don't run the guest (internal implementation need).
+ *
+ * Contrary to the flags above, this is set/cleared outside of
+ * a vcpu context, and thus cannot be mixed with the flags
+ * themselves (or the flag accesses need to be made atomic).
+ */
+ bool pause;
/*
* We maintain more than a single set of debug registers to support
@@ -376,9 +398,6 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
- /* Don't run the guest (internal implementation need) */
- bool pause;
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -392,10 +411,6 @@ struct kvm_vcpu_arch {
/* Additional reset state */
struct vcpu_reset_state reset_state;
- /* True when deferrable sysregs are loaded on the physical CPU,
- * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
- bool sysregs_loaded_on_cpu;
-
/* Guest PV state */
struct {
u64 last_steal;
@@ -403,6 +418,124 @@ struct kvm_vcpu_arch {
} steal;
};
+/*
+ * Each 'flag' is composed of a comma-separated triplet:
+ *
+ * - the flag-set it belongs to in the vcpu->arch structure
+ * - the value for that flag
+ * - the mask for that flag
+ *
+ * __vcpu_single_flag() builds such a triplet for a single-bit flag.
+ * unpack_vcpu_flag() extract the flag value from the triplet for
+ * direct use outside of the flag accessors.
+ */
+#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
+
+#define __unpack_flag(_set, _f, _m) _f
+#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
+
+#define __build_check_flag(v, flagset, f, m) \
+ do { \
+ typeof(v->arch.flagset) *_fset; \
+ \
+ /* Check that the flags fit in the mask */ \
+ BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
+ /* Check that the flags fit in the type */ \
+ BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
+ } while (0)
+
+#define __vcpu_get_flag(v, flagset, f, m) \
+ ({ \
+ __build_check_flag(v, flagset, f, m); \
+ \
+ v->arch.flagset & (m); \
+ })
+
+#define __vcpu_set_flag(v, flagset, f, m) \
+ do { \
+ typeof(v->arch.flagset) *fset; \
+ \
+ __build_check_flag(v, flagset, f, m); \
+ \
+ fset = &v->arch.flagset; \
+ if (HWEIGHT(m) > 1) \
+ *fset &= ~(m); \
+ *fset |= (f); \
+ } while (0)
+
+#define __vcpu_clear_flag(v, flagset, f, m) \
+ do { \
+ typeof(v->arch.flagset) *fset; \
+ \
+ __build_check_flag(v, flagset, f, m); \
+ \
+ fset = &v->arch.flagset; \
+ *fset &= ~(m); \
+ } while (0)
+
+#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
+#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
+#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
+
+/* SVE exposed to guest */
+#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
+/* SVE config completed */
+#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
+/* PTRAUTH exposed to guest */
+#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
+
+/* Exception pending */
+#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
+/*
+ * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
+ * be set together with an exception...
+ */
+#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
+/* Target EL/MODE (not a single flag, but let's abuse the macro) */
+#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
+
+/* Helpers to encode exceptions with minimum fuss */
+#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
+#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
+#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
+
+/*
+ * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
+ * values:
+ *
+ * For AArch32 EL1:
+ */
+#define EXCEPT_AA32_UND __vcpu_except_flags(0)
+#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
+#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
+/* For AArch64: */
+#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
+#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
+#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
+#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
+/* For AArch64 with NV (one day): */
+#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
+#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
+#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
+#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
+/* Guest debug is live */
+#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
+/* Save SPE context if active */
+#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
+/* Save TRBE context if active */
+#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
+
+/* SVE enabled for host EL0 */
+#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
+/* SME enabled for EL0 */
+#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
+/* Physical CPU not in supported_cpus */
+#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
+/* WFIT instruction trapped */
+#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
+/* vcpu system registers loaded on physical CPU */
+#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
+
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
sve_ffr_offset((vcpu)->arch.sve_max_vl))
@@ -423,70 +556,31 @@ struct kvm_vcpu_arch {
__size_ret; \
})
-/* vcpu_arch flags field values: */
-#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
-#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
-#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
-#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
-#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
-#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
-#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
-#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
-/*
- * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
- * set together with an exception...
- */
-#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
-#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
-/*
- * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
- * take the following values:
- *
- * For AArch32 EL1:
- */
-#define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
-#define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
-#define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
-/* For AArch64: */
-#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
-#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
-#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
-#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
-
-#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
-#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
-#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
-#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
-#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
-#define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */
-
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
- ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+ vcpu_get_flag(vcpu, GUEST_HAS_SVE))
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
- (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+ vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
#define vcpu_on_unsupported_cpu(vcpu) \
- ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
+ vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
#define vcpu_set_on_unsupported_cpu(vcpu) \
- ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
+ vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
#define vcpu_clear_on_unsupported_cpu(vcpu) \
- ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
+ vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
@@ -620,8 +714,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
@@ -831,8 +923,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
-#define kvm_arm_vcpu_sve_finalized(vcpu) \
- ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
#define kvm_has_mte(kvm) \
(system_supports_mte() && \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ce8614fa376a..9dd08cd339c3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -114,6 +114,14 @@
#define OVERFLOW_STACK_SIZE SZ_4K
/*
+ * With the minimum frame size of [x29, x30], exactly half the combined
+ * sizes of the hyp and overflow stacks is the maximum size needed to
+ * save the unwinded stacktrace; plus an additional entry to delimit the
+ * end.
+ */
+#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
+
+/*
* Alignment of kernel segments (e.g. .text, .data).
*
* 4 KB granule: 16 level 3 entries, with contiguous bit
@@ -174,7 +182,11 @@
#include <linux/types.h>
#include <asm/bug.h>
+#if VA_BITS > 48
extern u64 vabits_actual;
+#else
+#define vabits_actual ((u64)VA_BITS)
+#endif
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
@@ -351,6 +363,11 @@ static inline void *phys_to_virt(phys_addr_t x)
})
void dump_mem_limit(void);
+
+static inline bool defer_reserve_crashkernel(void)
+{
+ return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
+}
#endif /* !ASSEMBLY */
/*
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 6770667b34a3..c7ccd82db1d2 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -60,8 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
* physical memory, in which case it will be smaller.
*/
-extern u64 idmap_t0sz;
-extern u64 idmap_ptrs_per_pgd;
+extern int idmap_t0sz;
/*
* Ensure TCR.T0SZ is set to the provided value.
@@ -106,13 +105,18 @@ static inline void cpu_uninstall_idmap(void)
cpu_switch_mm(mm->pgd, mm);
}
-static inline void cpu_install_idmap(void)
+static inline void __cpu_install_idmap(pgd_t *idmap)
{
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
+ cpu_switch_mm(lm_alias(idmap), &init_mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+ __cpu_install_idmap(idmap_pg_dir);
}
/*
@@ -143,7 +147,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
+static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -166,7 +170,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
- cpu_install_idmap();
+ __cpu_install_idmap(idmap);
replace_phys(ttbr1);
cpu_uninstall_idmap();
}
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b33ca260e3c9..016eb6b46dc0 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -9,7 +9,6 @@
#include <asm/io.h>
#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM 0
/*
* Set to 1 if the kernel should re-assign all PCI bus numbers
@@ -18,21 +17,8 @@
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
#define arch_can_pci_mmap_wc() 1
-#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on arm64 */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return 1;
-}
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index dd3d12bce07b..5ab8d163198f 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -281,10 +281,9 @@
*/
#ifdef CONFIG_ARM64_PA_BITS_52
/*
- * This should be GENMASK_ULL(47, 2).
* TTBR_ELx[1] is RES0 in this configuration.
*/
-#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
+#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
#endif
#ifdef CONFIG_ARM64_VA_BITS_52
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0b6632f18364..b5df82aa99e6 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -45,6 +45,12 @@
__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool arch_thp_swp_supported(void)
+{
+ return !system_supports_mte();
+}
+#define arch_thp_swp_supported arch_thp_swp_supported
+
/*
* Outside of a few very special situations (e.g. hibernation), we always
* use broadcast TLB invalidation instructions, therefore a spurious page
@@ -427,6 +433,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
}
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/linux/pgtable.h
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9e58749db21d..86eb0bfe3b38 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -272,8 +272,9 @@ void tls_preserve_current_state(void);
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
{
+ s32 previous_syscall = regs->syscallno;
memset(regs, 0, sizeof(*regs));
- forget_syscall(regs);
+ regs->syscallno = previous_syscall;
regs->pc = pc;
if (system_uses_irq_prio_masking())
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index aec9315bf156..6ebdcdff77f5 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -8,52 +8,20 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
-#include <linux/types.h>
#include <linux/llist.h>
#include <asm/memory.h>
+#include <asm/pointer_auth.h>
#include <asm/ptrace.h>
#include <asm/sdei.h>
-enum stack_type {
- STACK_TYPE_UNKNOWN,
- STACK_TYPE_TASK,
- STACK_TYPE_IRQ,
- STACK_TYPE_OVERFLOW,
- STACK_TYPE_SDEI_NORMAL,
- STACK_TYPE_SDEI_CRITICAL,
- __NR_STACK_TYPES
-};
-
-struct stack_info {
- unsigned long low;
- unsigned long high;
- enum stack_type type;
-};
+#include <asm/stacktrace/common.h>
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_stack(unsigned long sp, unsigned long size,
- unsigned long low, unsigned long high,
- enum stack_type type, struct stack_info *info)
-{
- if (!low)
- return false;
-
- if (sp < low || sp + size < sp || sp + size > high)
- return false;
-
- if (info) {
- info->low = low;
- info->high = high;
- info->type = type;
- }
- return true;
-}
-
static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
@@ -89,30 +57,4 @@ static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#endif
-
-/*
- * We can only safely access per-cpu stacks from current in a non-preemptible
- * context.
- */
-static inline bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp, unsigned long size,
- struct stack_info *info)
-{
- if (info)
- info->type = STACK_TYPE_UNKNOWN;
-
- if (on_task_stack(tsk, sp, size, info))
- return true;
- if (tsk != current || preemptible())
- return false;
- if (on_irq_stack(sp, size, info))
- return true;
- if (on_overflow_stack(sp, size, info))
- return true;
- if (on_sdei_stack(sp, size, info))
- return true;
-
- return false;
-}
-
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
new file mode 100644
index 000000000000..f58eb944c46f
--- /dev/null
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common arm64 stack unwinder code.
+ *
+ * To implement a new arm64 stack unwinder:
+ * 1) Include this header
+ *
+ * 2) Call into unwind_next_common() from your top level unwind
+ * function, passing it the validation and translation callbacks
+ * (though the later can be NULL if no translation is required).
+ *
+ * See: arch/arm64/kernel/stacktrace.c for the reference implementation.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ */
+#ifndef __ASM_STACKTRACE_COMMON_H
+#define __ASM_STACKTRACE_COMMON_H
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/kprobes.h>
+#include <linux/types.h>
+
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_TASK,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_OVERFLOW,
+ STACK_TYPE_SDEI_NORMAL,
+ STACK_TYPE_SDEI_CRITICAL,
+ STACK_TYPE_HYP,
+ __NR_STACK_TYPES
+};
+
+struct stack_info {
+ unsigned long low;
+ unsigned long high;
+ enum stack_type type;
+};
+
+/*
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
+ *
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The lr value in the frame record (or the real lr)
+ *
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
+ *
+ * @task: The task being unwound.
+ */
+struct unwind_state {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
+ struct task_struct *task;
+};
+
+static inline bool on_stack(unsigned long sp, unsigned long size,
+ unsigned long low, unsigned long high,
+ enum stack_type type, struct stack_info *info)
+{
+ if (!low)
+ return false;
+
+ if (sp < low || sp + size < sp || sp + size > high)
+ return false;
+
+ if (info) {
+ info->low = low;
+ info->high = high;
+ info->type = type;
+ }
+ return true;
+}
+
+static inline void unwind_init_common(struct unwind_state *state,
+ struct task_struct *task)
+{
+ state->task = task;
+#ifdef CONFIG_KRETPROBES
+ state->kr_cur = NULL;
+#endif
+
+ /*
+ * Prime the first unwind.
+ *
+ * In unwind_next() we'll check that the FP points to a valid stack,
+ * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+ * treated as a transition to whichever stack that happens to be. The
+ * prev_fp value won't be used, but we set it to 0 such that it is
+ * definitely not an accessible stack address.
+ */
+ bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
+ state->prev_fp = 0;
+ state->prev_type = STACK_TYPE_UNKNOWN;
+}
+
+/*
+ * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to
+ * a kernel address.
+ *
+ * @fp: the frame pointer to be updated to its kernel address.
+ * @type: the stack type associated with frame pointer @fp
+ *
+ * Returns true and success and @fp is updated to the corresponding
+ * kernel virtual address; otherwise returns false.
+ */
+typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
+ enum stack_type type);
+
+/*
+ * on_accessible_stack_fn() - Check whether a stack range is on any
+ * of the possible stacks.
+ *
+ * @tsk: task whose stack is being unwound
+ * @sp: stack address being checked
+ * @size: size of the stack range being checked
+ * @info: stack unwinding context
+ */
+typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
+ unsigned long sp, unsigned long size,
+ struct stack_info *info);
+
+static inline int unwind_next_common(struct unwind_state *state,
+ struct stack_info *info,
+ on_accessible_stack_fn accessible,
+ stack_trace_translate_fp_fn translate_fp)
+{
+ unsigned long fp = state->fp, kern_fp = fp;
+ struct task_struct *tsk = state->task;
+
+ if (fp & 0x7)
+ return -EINVAL;
+
+ if (!accessible(tsk, fp, 16, info))
+ return -EINVAL;
+
+ if (test_bit(info->type, state->stacks_done))
+ return -EINVAL;
+
+ /*
+ * If fp is not from the current address space perform the necessary
+ * translation before dereferencing it to get the next fp.
+ */
+ if (translate_fp && !translate_fp(&kern_fp, info->type))
+ return -EINVAL;
+
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ * HYP -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info->type == state->prev_type) {
+ if (fp <= state->prev_fp)
+ return -EINVAL;
+ } else {
+ __set_bit(state->prev_type, state->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_next() invocation.
+ */
+ state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
+ state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
+ state->prev_fp = fp;
+ state->prev_type = info->type;
+
+ return 0;
+}
+
+#endif /* __ASM_STACKTRACE_COMMON_H */
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
new file mode 100644
index 000000000000..d5527b600390
--- /dev/null
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM nVHE hypervisor stack tracing support.
+ *
+ * The unwinder implementation depends on the nVHE mode:
+ *
+ * 1) Non-protected nVHE mode - the host can directly access the
+ * HYP stack pages and unwind the HYP stack in EL1. This saves having
+ * to allocate shared buffers for the host to read the unwinded
+ * stacktrace.
+ *
+ * 2) pKVM (protected nVHE) mode - the host cannot directly access
+ * the HYP memory. The stack is unwinded in EL2 and dumped to a shared
+ * buffer where the host can read and print the stacktrace.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __ASM_STACKTRACE_NVHE_H
+#define __ASM_STACKTRACE_NVHE_H
+
+#include <asm/stacktrace/common.h>
+
+/*
+ * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
+ *
+ * @state : unwind_state to initialize
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ */
+static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
+ unsigned long fp,
+ unsigned long pc)
+{
+ unwind_init_common(state, NULL);
+
+ state->fp = fp;
+ state->pc = pc;
+}
+
+#ifndef __KVM_NVHE_HYPERVISOR__
+/*
+ * Conventional (non-protected) nVHE HYP stack unwinder
+ *
+ * In non-protected mode, the unwinding is done from kernel proper context
+ * (by the host in EL1).
+ */
+
+DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+
+void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
+
+#endif /* __KVM_NVHE_HYPERVISOR__ */
+#endif /* __ASM_STACKTRACE_NVHE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 42ff95dba6da..7c71358d44c4 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -192,8 +192,6 @@
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
-#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
-#define SYS_ID_AA64SMFR0_EL1 sys_reg(3, 0, 0, 4, 5)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
@@ -201,9 +199,6 @@
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
-#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
-#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
-
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
@@ -410,12 +405,6 @@
#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
-#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0)
-#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1)
-#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2)
-#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3)
-#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7)
-
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
@@ -454,16 +443,12 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
-#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
-#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
-#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
-
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
@@ -704,66 +689,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64isar1 */
-#define ID_AA64ISAR1_I8MM_SHIFT 52
-#define ID_AA64ISAR1_DGH_SHIFT 48
-#define ID_AA64ISAR1_BF16_SHIFT 44
-#define ID_AA64ISAR1_SPECRES_SHIFT 40
-#define ID_AA64ISAR1_SB_SHIFT 36
-#define ID_AA64ISAR1_FRINTTS_SHIFT 32
-#define ID_AA64ISAR1_GPI_SHIFT 28
-#define ID_AA64ISAR1_GPA_SHIFT 24
-#define ID_AA64ISAR1_LRCPC_SHIFT 20
-#define ID_AA64ISAR1_FCMA_SHIFT 16
-#define ID_AA64ISAR1_JSCVT_SHIFT 12
-#define ID_AA64ISAR1_API_SHIFT 8
-#define ID_AA64ISAR1_APA_SHIFT 4
-#define ID_AA64ISAR1_DPB_SHIFT 0
-
-#define ID_AA64ISAR1_APA_NI 0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2
-#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3
-#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4
-#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5
-#define ID_AA64ISAR1_API_NI 0x0
-#define ID_AA64ISAR1_API_IMP_DEF 0x1
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4
-#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5
-#define ID_AA64ISAR1_GPA_NI 0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
-#define ID_AA64ISAR1_GPI_NI 0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
-
-/* id_aa64isar2 */
-#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
-#define ID_AA64ISAR2_APA3_SHIFT 12
-#define ID_AA64ISAR2_GPA3_SHIFT 8
-#define ID_AA64ISAR2_RPRES_SHIFT 4
-#define ID_AA64ISAR2_WFXT_SHIFT 0
-
-#define ID_AA64ISAR2_RPRES_8BIT 0x0
-#define ID_AA64ISAR2_RPRES_12BIT 0x1
-/*
- * Value 0x1 has been removed from the architecture, and is
- * reserved, but has not yet been removed from the ARM ARM
- * as of ARM DDI 0487G.b.
- */
-#define ID_AA64ISAR2_WFXT_NI 0x0
-#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
-
-#define ID_AA64ISAR2_APA3_NI 0x0
-#define ID_AA64ISAR2_APA3_ARCHITECTED 0x1
-#define ID_AA64ISAR2_APA3_ARCH_EPAC 0x2
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2 0x3
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC 0x4
-#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC_CMB 0x5
-
-#define ID_AA64ISAR2_GPA3_NI 0x0
-#define ID_AA64ISAR2_GPA3_ARCHITECTED 0x1
-
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
@@ -811,45 +736,6 @@
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
-/* id_aa64zfr0 */
-#define ID_AA64ZFR0_F64MM_SHIFT 56
-#define ID_AA64ZFR0_F32MM_SHIFT 52
-#define ID_AA64ZFR0_I8MM_SHIFT 44
-#define ID_AA64ZFR0_SM4_SHIFT 40
-#define ID_AA64ZFR0_SHA3_SHIFT 32
-#define ID_AA64ZFR0_BF16_SHIFT 20
-#define ID_AA64ZFR0_BITPERM_SHIFT 16
-#define ID_AA64ZFR0_AES_SHIFT 4
-#define ID_AA64ZFR0_SVEVER_SHIFT 0
-
-#define ID_AA64ZFR0_F64MM 0x1
-#define ID_AA64ZFR0_F32MM 0x1
-#define ID_AA64ZFR0_I8MM 0x1
-#define ID_AA64ZFR0_BF16 0x1
-#define ID_AA64ZFR0_SM4 0x1
-#define ID_AA64ZFR0_SHA3 0x1
-#define ID_AA64ZFR0_BITPERM 0x1
-#define ID_AA64ZFR0_AES 0x1
-#define ID_AA64ZFR0_AES_PMULL 0x2
-#define ID_AA64ZFR0_SVEVER_SVE2 0x1
-
-/* id_aa64smfr0 */
-#define ID_AA64SMFR0_FA64_SHIFT 63
-#define ID_AA64SMFR0_I16I64_SHIFT 52
-#define ID_AA64SMFR0_F64F64_SHIFT 48
-#define ID_AA64SMFR0_I8I32_SHIFT 36
-#define ID_AA64SMFR0_F16F32_SHIFT 35
-#define ID_AA64SMFR0_B16F32_SHIFT 34
-#define ID_AA64SMFR0_F32F32_SHIFT 32
-
-#define ID_AA64SMFR0_FA64 0x1
-#define ID_AA64SMFR0_I16I64 0xf
-#define ID_AA64SMFR0_F64F64 0x1
-#define ID_AA64SMFR0_I8I32 0xf
-#define ID_AA64SMFR0_F16F32 0x1
-#define ID_AA64SMFR0_B16F32 0x1
-#define ID_AA64SMFR0_F32F32 0x1
-
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60
#define ID_AA64MMFR0_FGT_SHIFT 56
@@ -902,6 +788,7 @@
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
+#define ID_AA64MMFR1_TIDCP1_SHIFT 52
#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
@@ -918,6 +805,9 @@
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
+#define ID_AA64MMFR1_TIDCP1_NI 0
+#define ID_AA64MMFR1_TIDCP1_IMP 1
+
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
@@ -1084,9 +974,6 @@
#define MVFR2_FPMISC_SHIFT 4
#define MVFR2_SIMDMISC_SHIFT 0
-#define DCZID_DZP_SHIFT 4
-#define DCZID_BS_SHIFT 0
-
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
@@ -1121,8 +1008,8 @@
#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
/* GMID_EL1 field definitions */
-#define SYS_GMID_EL1_BS_SHIFT 0
-#define SYS_GMID_EL1_BS_SIZE 4
+#define GMID_EL1_BS_SHIFT 0
+#define GMID_EL1_BS_SIZE 4
/* TFSR{,E0}_EL1 bit definitions */
#define SYS_TFSR_EL1_TF0_SHIFT 0
@@ -1324,6 +1211,9 @@
#endif
+#define SYS_FIELD_GET(reg, field, val) \
+ FIELD_GET(reg##_##field##_MASK, val)
+
#define SYS_FIELD_PREP(reg, field, val) \
FIELD_PREP(reg##_##field##_MASK, val)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 63f9c828f1a7..2fc9f0861769 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -232,34 +232,34 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_mem_asm(load, reg, x, addr, err) \
+#define __get_mem_asm(load, reg, x, addr, err, type) \
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
+ _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
: "r" (addr))
-#define __raw_get_mem(ldr, x, ptr, err) \
-do { \
- unsigned long __gu_val; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
- break; \
- case 2: \
- __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
- break; \
- case 4: \
- __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
- break; \
- case 8: \
- __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+#define __raw_get_mem(ldr, x, ptr, err, type) \
+do { \
+ unsigned long __gu_val; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 2: \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 4: \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 8: \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
/*
@@ -274,7 +274,7 @@ do { \
__chk_user_ptr(ptr); \
\
uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
uaccess_ttbr0_disable(); \
\
(x) = __rgu_val; \
@@ -314,40 +314,40 @@ do { \
\
__uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
- (__force type *)(__gkn_src), __gkn_err); \
+ (__force type *)(__gkn_src), __gkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
-#define __put_mem_asm(store, reg, x, addr, err) \
+#define __put_mem_asm(store, reg, x, addr, err, type) \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
+ _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
: "r" (x), "r" (addr))
-#define __raw_put_mem(str, x, ptr, err) \
-do { \
- __typeof__(*(ptr)) __pu_val = (x); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
- break; \
- case 2: \
- __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
- break; \
- case 4: \
- __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
- break; \
- case 8: \
- __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
+#define __raw_put_mem(str, x, ptr, err, type) \
+do { \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 2: \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 4: \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 8: \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
} while (0)
/*
@@ -362,7 +362,7 @@ do { \
__chk_user_ptr(__rpu_ptr); \
\
uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
uaccess_ttbr0_disable(); \
} while (0)
@@ -400,7 +400,7 @@ do { \
\
__uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
- (__force type *)(__pkn_dst), __pkn_err); \
+ (__force type *)(__pkn_dst), __pkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 0e80db4327b6..4eb601e7de50 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -36,9 +36,9 @@
#define HVC_RESET_VECTORS 2
/*
- * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
*/
-#define HVC_VHE_RESTART 3
+#define HVC_FINALISE_EL2 3
/* Max number of HYP stub hypercalls */
#define HVC_STUB_HCALL_NR 4
@@ -49,6 +49,13 @@
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
+/*
+ * Flags returned together with the boot mode, but not preserved in
+ * __boot_cpu_mode. Used by the idreg override code to work out the
+ * boot state.
+ */
+#define BOOT_CPU_FLAG_E2H BIT_ULL(32)
+
#ifndef __ASSEMBLY__
#include <asm/ptrace.h>