diff options
Diffstat (limited to 'arch/powerpc/include/asm')
22 files changed, 251 insertions, 175 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 64870c7be4a3..148bee20e7e2 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -4,6 +4,7 @@ generated-y += syscall_table_64.h generated-y += syscall_table_c32.h generated-y += syscall_table_spu.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += export.h generic-y += irq_regs.h generic-y += local64.h @@ -11,3 +12,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += vtime.h generic-y += msi.h +generic-y += early_ioremap.h diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index d5a44912902f..f6968c811026 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -122,11 +122,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { - /* - * By now all the pud entries should be none entries. So go - * ahead and flush the page walk cache - */ - flush_tlb_pgtable(tlb, address); pgtable_free_tlb(tlb, pud, PUD_INDEX); } @@ -143,11 +138,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { - /* - * By now all the pud entries should be none entries. So go - * ahead and flush the page walk cache - */ - flush_tlb_pgtable(tlb, address); return pgtable_free_tlb(tlb, pmd, PMD_INDEX); } @@ -166,11 +156,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - /* - * By now all the pud entries should be none entries. So go - * ahead and flush the page walk cache - */ - flush_tlb_pgtable(tlb, address); pgtable_free_tlb(tlb, table, PTE_INDEX); } diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 7aa8195b6cff..dcb5c3839d2f 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -147,22 +147,6 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, flush_tlb_page(vma, address); } -/* - * flush the page walk cache for the address - */ -static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address) -{ - /* - * Flush the page table walk cache on freeing a page table. We already - * have marked the upper/higher level page table entry none by now. - * So it is safe to flush PWC here. - */ - if (!radix_enabled()) - return; - - radix__flush_tlb_pwc(tlb, address); -} - extern bool tlbie_capable; extern bool tlbie_enabled; diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index f47e6ff6554d..338f36cd9934 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -49,6 +49,15 @@ ".previous\n" #endif +#define BUG_ENTRY(insn, flags, ...) \ + __asm__ __volatile__( \ + "1: " insn "\n" \ + _EMIT_BUG_ENTRY \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (flags), \ + "i" (sizeof(struct bug_entry)), \ + ##__VA_ARGS__) + /* * BUG_ON() and WARN_ON() do their best to cooperate with compile-time * optimisations. However depending on the complexity of the condition @@ -56,11 +65,7 @@ */ #define BUG() do { \ - __asm__ __volatile__( \ - "1: twi 31,0,0\n" \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (0), "i" (sizeof(struct bug_entry))); \ + BUG_ENTRY("twi 31, 0, 0", 0); \ unreachable(); \ } while (0) @@ -69,23 +74,11 @@ if (x) \ BUG(); \ } else { \ - __asm__ __volatile__( \ - "1: "PPC_TLNEI" %4,0\n" \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), "i" (0), \ - "i" (sizeof(struct bug_entry)), \ - "r" ((__force long)(x))); \ + BUG_ENTRY(PPC_TLNEI " %4, 0", 0, "r" ((__force long)(x))); \ } \ } while (0) -#define __WARN_FLAGS(flags) do { \ - __asm__ __volatile__( \ - "1: twi 31,0,0\n" \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (BUGFLAG_WARNING|(flags)), \ - "i" (sizeof(struct bug_entry))); \ -} while (0) +#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags)) #define WARN_ON(x) ({ \ int __ret_warn_on = !!(x); \ @@ -93,13 +86,9 @@ if (__ret_warn_on) \ __WARN(); \ } else { \ - __asm__ __volatile__( \ - "1: "PPC_TLNEI" %4,0\n" \ - _EMIT_BUG_ENTRY \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ - "i" (sizeof(struct bug_entry)), \ - "r" (__ret_warn_on)); \ + BUG_ENTRY(PPC_TLNEI " %4, 0", \ + BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \ + "r" (__ret_warn_on)); \ } \ unlikely(__ret_warn_on); \ }) diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 45e3137ccd71..72b81015cebe 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -55,42 +55,48 @@ struct ppc64_caches { extern struct ppc64_caches ppc64_caches; -static inline u32 l1_cache_shift(void) +static inline u32 l1_dcache_shift(void) { return ppc64_caches.l1d.log_block_size; } -static inline u32 l1_cache_bytes(void) +static inline u32 l1_dcache_bytes(void) { return ppc64_caches.l1d.block_size; } + +static inline u32 l1_icache_shift(void) +{ + return ppc64_caches.l1i.log_block_size; +} + +static inline u32 l1_icache_bytes(void) +{ + return ppc64_caches.l1i.block_size; +} #else -static inline u32 l1_cache_shift(void) +static inline u32 l1_dcache_shift(void) { return L1_CACHE_SHIFT; } -static inline u32 l1_cache_bytes(void) +static inline u32 l1_dcache_bytes(void) { return L1_CACHE_BYTES; } + +static inline u32 l1_icache_shift(void) +{ + return L1_CACHE_SHIFT; +} + +static inline u32 l1_icache_bytes(void) +{ + return L1_CACHE_BYTES; +} + #endif -#endif /* ! __ASSEMBLY__ */ - -#if defined(__ASSEMBLY__) -/* - * For a snooping icache, we still need a dummy icbi to purge all the - * prefetched instructions from the ifetch buffers. We also need a sync - * before the icbi to order the the actual stores to memory that might - * have modified instructions with the icbi. - */ -#define PURGE_PREFETCHED_INS \ - sync; \ - icbi 0,r3; \ - sync; \ - isync -#else #define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_PPC_BOOK3S_32 @@ -124,6 +130,17 @@ static inline void dcbst(void *addr) { __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); } + +static inline void icbi(void *addr) +{ + asm volatile ("icbi 0, %0" : : "r"(addr) : "memory"); +} + +static inline void iccci(void *addr) +{ + asm volatile ("iccci 0, %0" : : "r"(addr) : "memory"); +} + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHE_H */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index eef388f2659f..4a1c9f0200e1 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -42,29 +42,25 @@ extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -extern void flush_icache_range(unsigned long, unsigned long); +void flush_icache_range(unsigned long start, unsigned long stop); extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); -extern void __flush_dcache_icache(void *page_va); extern void flush_dcache_icache_page(struct page *page); -#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) -extern void __flush_dcache_icache_phys(unsigned long physaddr); -#else -static inline void __flush_dcache_icache_phys(unsigned long physaddr) -{ - BUG(); -} -#endif - -/* - * Write any modified data cache blocks out to memory and invalidate them. - * Does not invalidate the corresponding instruction cache blocks. +void __flush_dcache_icache(void *page); + +/** + * flush_dcache_range(): Write any modified data cache blocks out to memory and + * invalidate them. Does not invalidate the corresponding instruction cache + * blocks. + * + * @start: the start address + * @stop: the stop address (exclusive) */ static inline void flush_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; @@ -89,8 +85,8 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop) */ static inline void clean_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; @@ -108,8 +104,8 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop) static inline void invalidate_dcache_range(unsigned long start, unsigned long stop) { - unsigned long shift = l1_cache_shift(); - unsigned long bytes = l1_cache_bytes(); + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); void *addr = (void *)(start & ~(bytes - 1)); unsigned long size = stop - (unsigned long)addr + (bytes - 1); unsigned long i; diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h deleted file mode 100644 index 565d6f74b189..000000000000 --- a/arch/powerpc/include/asm/dma-mapping.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2004 IBM - */ -#ifndef _ASM_DMA_MAPPING_H -#define _ASM_DMA_MAPPING_H - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - /* We don't handle the NULL dev case for ISA for now. We could - * do it via an out of line call but it is not needed for now. The - * only ISA DMA device we support is the floppy and we have a hack - * in the floppy driver directly to get a device for us. - */ - return NULL; -} - -#endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 0cfc365d814b..2ef155a3c821 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -15,6 +15,7 @@ #define _ASM_FIXMAP_H #ifndef __ASSEMBLY__ +#include <linux/sizes.h> #include <asm/page.h> #include <asm/pgtable.h> #ifdef CONFIG_HIGHMEM @@ -63,7 +64,22 @@ enum fixed_addresses { FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 + FIX_IMMR_SIZE, #endif +#ifdef CONFIG_PPC_83xx + /* For IMMR we need an aligned 2M area */ +#define FIX_IMMR_SIZE (SZ_2M / PAGE_SIZE) + FIX_IMMR_START, + FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 + + FIX_IMMR_SIZE, +#endif /* FIX_PCIE_MCFG, */ + __end_of_permanent_fixed_addresses, + +#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE) +#define FIX_BTMAPS_SLOTS 16 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, __end_of_fixed_addresses }; @@ -71,14 +87,22 @@ enum fixed_addresses { #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG +#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG #include <asm-generic/fixmap.h> static inline void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { - map_kernel_page(fix_to_virt(idx), phys, flags); + if (__builtin_constant_p(idx)) + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + else if (WARN_ON(idx >= __end_of_fixed_addresses)) + return; + + map_kernel_page(__fix_to_virt(idx), phys, flags); } +#define __early_set_fixmap __set_fixmap + #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 67e2da195eae..27ac6f5d2891 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -14,6 +14,7 @@ struct arch_hw_breakpoint { unsigned long address; u16 type; u16 len; /* length of the target data symbol */ + u16 hw_len; /* length programmed in hw */ }; /* Note: Don't change the the first 6 bits below as they are in the same order @@ -33,6 +34,11 @@ struct arch_hw_breakpoint { #define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ HW_BRK_TYPE_HYP) +#define HW_BREAKPOINT_ALIGN 0x7 + +#define DABR_MAX_LEN 8 +#define DAWR_MAX_LEN 512 + #ifdef CONFIG_HAVE_HW_BREAKPOINT #include <linux/kdebug.h> #include <asm/reg.h> @@ -44,8 +50,6 @@ struct pmu; struct perf_sample_data; struct task_struct; -#define HW_BREAKPOINT_ALIGN 0x7 - extern int hw_breakpoint_slots(int type); extern int arch_bp_generic_fields(int type, int *gen_bp_type); extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); @@ -70,6 +74,7 @@ static inline void hw_breakpoint_disable(void) brk.address = 0; brk.type = 0; brk.len = 0; + brk.hw_len = 0; if (ppc_breakpoint_available()) __set_breakpoint(&brk); } diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 32a18f2f49bc..e3a905e3d573 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -226,8 +226,8 @@ static inline bool arch_irqs_disabled(void) #endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_BOOK3E -#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory") -#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory") +#define __hard_irq_enable() wrtee(MSR_EE) +#define __hard_irq_disable() wrtee(0) #else #define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1) #define __hard_irq_disable() __mtmsrd(MSR_RI, 1) @@ -280,8 +280,6 @@ extern void force_external_irq_replay(void); #else /* CONFIG_PPC64 */ -#define SET_MSR_EE(x) mtmsr(x) - static inline unsigned long arch_local_save_flags(void) { return mfmsr(); @@ -289,47 +287,44 @@ static inline unsigned long arch_local_save_flags(void) static inline void arch_local_irq_restore(unsigned long flags) { -#if defined(CONFIG_BOOKE) - asm volatile("wrtee %0" : : "r" (flags) : "memory"); -#else - mtmsr(flags); -#endif + if (IS_ENABLED(CONFIG_BOOKE)) + wrtee(flags); + else + mtmsr(flags); } static inline unsigned long arch_local_irq_save(void) { unsigned long flags = arch_local_save_flags(); -#ifdef CONFIG_BOOKE - asm volatile("wrteei 0" : : : "memory"); -#elif defined(CONFIG_PPC_8xx) - wrtspr(SPRN_EID); -#else - SET_MSR_EE(flags & ~MSR_EE); -#endif + + if (IS_ENABLED(CONFIG_BOOKE)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else + mtmsr(flags & ~MSR_EE); + return flags; } static inline void arch_local_irq_disable(void) { -#ifdef CONFIG_BOOKE - asm volatile("wrteei 0" : : : "memory"); -#elif defined(CONFIG_PPC_8xx) - wrtspr(SPRN_EID); -#else - arch_local_irq_save(); -#endif + if (IS_ENABLED(CONFIG_BOOKE)) + wrtee(0); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EID); + else + mtmsr(mfmsr() & ~MSR_EE); } static inline void arch_local_irq_enable(void) { -#ifdef CONFIG_BOOKE - asm volatile("wrteei 1" : : : "memory"); -#elif defined(CONFIG_PPC_8xx) - wrtspr(SPRN_EIE); -#else - unsigned long msr = mfmsr(); - SET_MSR_EE(msr | MSR_EE); -#endif + if (IS_ENABLED(CONFIG_BOOKE)) + wrtee(MSR_EE); + else if (IS_ENABLED(CONFIG_PPC_8xx)) + wrtspr(SPRN_EIE); + else + mtmsr(mfmsr() | MSR_EE); } static inline bool arch_irqs_disabled_flags(unsigned long flags) diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 1c3133b5f86a..1006a427e99c 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -3,6 +3,7 @@ #define _ASM_POWERPC_KUP_8XX_H_ #include <asm/bug.h> +#include <asm/mmu.h> #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h index 4c9777d256fb..b41004664312 100644 --- a/arch/powerpc/include/asm/nohash/mmu-book3e.h +++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h @@ -75,7 +75,6 @@ #define MAS2_E 0x00000001 #define MAS2_WIMGE_MASK 0x0000001f #define MAS2_EPN_MASK(size) (~0 << (size + 10)) -#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) #define MAS3_RPN 0xFFFFF000 #define MAS3_U0 0x00000200 @@ -221,6 +220,16 @@ #define TLBILX_T_CLASS2 6 #define TLBILX_T_CLASS3 7 +/* + * The mapping only needs to be cache-coherent on SMP, except on + * Freescale e500mc derivatives where it's also needed for coherent DMA. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define MAS2_M_IF_NEEDED MAS2_M +#else +#define MAS2_M_IF_NEEDED 0 +#endif + #ifndef __ASSEMBLY__ #include <asm/bug.h> diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 378e3997845a..c1f25a760eb1 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -211,7 +211,10 @@ #define OPAL_MPIPL_UPDATE 173 #define OPAL_MPIPL_REGISTER_TAG 174 #define OPAL_MPIPL_QUERY_TAG 175 -#define OPAL_LAST 175 +#define OPAL_SECVAR_GET 176 +#define OPAL_SECVAR_GET_NEXT 177 +#define OPAL_SECVAR_ENQUEUE_UPDATE 178 +#define OPAL_LAST 178 #define QUIESCE_HOLD 1 /* Spin all calls at entry */ #define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index a0cf8fba4d12..9986ac34b8e2 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -298,6 +298,13 @@ int opal_sensor_group_clear(u32 group_hndl, int token); int opal_sensor_group_enable(u32 group_hndl, int token, bool enable); int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct); +int opal_secvar_get(const char *key, uint64_t key_len, u8 *data, + uint64_t *data_size); +int opal_secvar_get_next(const char *key, uint64_t *key_len, + uint64_t key_buf_size); +int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data, + uint64_t data_size); + s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size); s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr); s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f6c562acc3f8..7f1fd41e3065 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -325,6 +325,13 @@ void arch_free_page(struct page *page, int order); struct vm_area_struct; +extern unsigned long kernstart_virt_addr; + +static inline unsigned long kaslr_offset(void) +{ + return kernstart_virt_addr - KERNELBASE; +} + #include <asm-generic/memory_model.h> #endif /* __ASSEMBLY__ */ #include <asm/slice.h> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 4053b2ab427c..0e4ec8cc37b7 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -157,13 +157,9 @@ static inline bool pgd_is_leaf(pgd_t pgd) #define is_ioremap_addr is_ioremap_addr static inline bool is_ioremap_addr(const void *x) { -#ifdef CONFIG_MMU unsigned long addr = (unsigned long)x; return addr >= IOREMAP_BASE && addr < IOREMAP_END; -#else - return false; -#endif } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 75c7e95a321b..1aa46dff0957 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -25,9 +25,7 @@ #include <asm/reg_fsl_emb.h> #endif -#ifdef CONFIG_PPC_8xx #include <asm/reg_8xx.h> -#endif /* CONFIG_PPC_8xx */ #define MSR_SF_LG 63 /* Enable 64 bit mode */ #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ @@ -1382,6 +1380,14 @@ static inline void mtmsr_isync(unsigned long val) #define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \ : : "memory") +static inline void wrtee(unsigned long val) +{ + if (__builtin_constant_p(val)) + asm volatile("wrteei %0" : : "i" ((val & MSR_EE) ? 1 : 0) : "memory"); + else + asm volatile("wrtee %0" : : "r" (val) : "memory"); +} + extern unsigned long msr_check_and_set(unsigned long bits); extern bool strict_msr_control; extern void __msr_check_and_clear(unsigned long bits); @@ -1396,19 +1402,9 @@ static inline void msr_check_and_clear(unsigned long bits) #define mftb() ({unsigned long rval; \ asm volatile( \ "90: mfspr %0, %2;\n" \ - "97: cmpwi %0,0;\n" \ - " beq- 90b;\n" \ - "99:\n" \ - ".section __ftr_fixup,\"a\"\n" \ - ".align 3\n" \ - "98:\n" \ - " .8byte %1\n" \ - " .8byte %1\n" \ - " .8byte 97b-98b\n" \ - " .8byte 99b-98b\n" \ - " .8byte 0\n" \ - " .8byte 0\n" \ - ".previous" \ + ASM_FTR_IFSET( \ + "97: cmpwi %0,0;\n" \ + " beq- 90b;\n", "", %1) \ : "=r" (rval) \ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ rval;}) diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h index 7192eece6c3e..07df35ee8cbc 100644 --- a/arch/powerpc/include/asm/reg_8xx.h +++ b/arch/powerpc/include/asm/reg_8xx.h @@ -5,8 +5,6 @@ #ifndef _ASM_POWERPC_REG_8xx_H #define _ASM_POWERPC_REG_8xx_H -#include <asm/mmu.h> - /* Cache control on the MPC8xx is provided through some additional * special purpose registers. */ @@ -38,7 +36,9 @@ #define SPRN_CMPF 153 #define SPRN_LCTRL1 156 #define SPRN_LCTRL2 157 +#ifdef CONFIG_PPC_8xx #define SPRN_ICTRL 158 +#endif #define SPRN_BAR 159 /* Commands. Only the first few are available to the instruction cache. diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 5a9b6eb651b6..d19871763ed4 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -5,8 +5,22 @@ #include <linux/elf.h> #include <linux/uaccess.h> + +#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed + #include <asm-generic/sections.h> +extern bool init_mem_is_free; + +static inline int arch_is_kernel_initmem_freed(unsigned long addr) +{ + if (!init_mem_is_free) + return 0; + + return addr >= (unsigned long)__init_begin && + addr < (unsigned long)__init_end; +} + extern char __head_end[]; #ifdef __powerpc64__ diff --git a/arch/powerpc/include/asm/secure_boot.h b/arch/powerpc/include/asm/secure_boot.h new file mode 100644 index 000000000000..a2ff556916c6 --- /dev/null +++ b/arch/powerpc/include/asm/secure_boot.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Secure boot definitions + * + * Copyright (C) 2019 IBM Corporation + * Author: Nayna Jain + */ +#ifndef _ASM_POWER_SECURE_BOOT_H +#define _ASM_POWER_SECURE_BOOT_H + +#ifdef CONFIG_PPC_SECURE_BOOT + +bool is_ppc_secureboot_enabled(void); +bool is_ppc_trustedboot_enabled(void); + +#else + +static inline bool is_ppc_secureboot_enabled(void) +{ + return false; +} + +static inline bool is_ppc_trustedboot_enabled(void) +{ + return false; +} + +#endif +#endif diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index ccf44c135389..7c05e95a5c44 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -9,7 +9,7 @@ #define _ASM_POWERPC_SECURITY_FEATURES_H -extern unsigned long powerpc_security_features; +extern u64 powerpc_security_features; extern bool rfi_flush; /* These are bit flags */ @@ -24,17 +24,17 @@ void setup_stf_barrier(void); void do_stf_barrier_fixups(enum stf_barrier_type types); void setup_count_cache_flush(void); -static inline void security_ftr_set(unsigned long feature) +static inline void security_ftr_set(u64 feature) { powerpc_security_features |= feature; } -static inline void security_ftr_clear(unsigned long feature) +static inline void security_ftr_clear(u64 feature) { powerpc_security_features &= ~feature; } -static inline bool security_ftr_enabled(unsigned long feature) +static inline bool security_ftr_enabled(u64 feature) { return !!(powerpc_security_features & feature); } diff --git a/arch/powerpc/include/asm/secvar.h b/arch/powerpc/include/asm/secvar.h new file mode 100644 index 000000000000..4cc35b58b986 --- /dev/null +++ b/arch/powerpc/include/asm/secvar.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 IBM Corporation + * Author: Nayna Jain + * + * PowerPC secure variable operations. + */ +#ifndef SECVAR_OPS_H +#define SECVAR_OPS_H + +#include <linux/types.h> +#include <linux/errno.h> + +extern const struct secvar_operations *secvar_ops; + +struct secvar_operations { + int (*get)(const char *key, uint64_t key_len, u8 *data, + uint64_t *data_size); + int (*get_next)(const char *key, uint64_t *key_len, + uint64_t keybufsize); + int (*set)(const char *key, uint64_t key_len, u8 *data, + uint64_t data_size); +}; + +#ifdef CONFIG_PPC_SECURE_BOOT + +extern void set_secvar_ops(const struct secvar_operations *ops); + +#else + +static inline void set_secvar_ops(const struct secvar_operations *ops) { } + +#endif + +#endif |