diff options
author | Will Deacon | 2021-06-24 14:04:33 +0100 |
---|---|---|
committer | Will Deacon | 2021-06-24 14:04:33 +0100 |
commit | 81ad4bb1fe91d28d793d801e462a284c7f82cc40 (patch) | |
tree | e837110a2618a5f0f35724bdff01b9c4af701ee6 /arch/arm64/kernel | |
parent | 078834caafbfc0fcbe5a380ff3102ed6bb5d7012 (diff) | |
parent | 52218fcd61cb42bde0d301db4acb3ffdf3463cc7 (diff) |
Merge branch 'for-next/mm' into for-next/core
Lots of cleanup to our various page-table definitions, but also some
non-critical fixes and removal of some unnecessary memory types. The
most interesting change here is the reduction of ARCH_DMA_MINALIGN back
to 64 bytes, since we're not aware of any machines that need a higher
value with the way the code is structured (only needed for non-coherent
DMA).
* for-next/mm:
arm64: tlb: fix the TTL value of tlb_get_level
arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS
arm64: head: fix code comments in set_cpu_boot_mode_flag
arm64: mm: drop unused __pa(__idmap_text_start)
arm64: mm: fix the count comments in compute_indices
arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
arm64: mm: Pass original fault address to handle_mm_fault()
arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK]
arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT
arm64/mm: Drop SWAPPER_INIT_MAP_SIZE
arm64: mm: decode xFSC in mem_abort_decode()
arm64: mm: Add is_el1_data_abort() helper
arm64: cache: Lower ARCH_DMA_MINALIGN to 64 (L1_CACHE_BYTES)
arm64: mm: Remove unused support for Normal-WT memory type
arm64: acpi: Map EFI_MEMORY_WT memory as Normal-NC
arm64: mm: Remove unused support for Device-GRE memory type
arm64: mm: Use better bitmap_zalloc()
arm64/mm: Make vmemmap_free() available only with CONFIG_MEMORY_HOTPLUG
arm64/mm: Remove [PUD|PMD]_TABLE_BIT from [pud|pmd]_bad()
arm64/mm: Validate CONFIG_PGTABLE_LEVELS
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/acpi.c | 22 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 2 |
3 files changed, 20 insertions, 9 deletions
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index cada0b816c8a..f3851724fe35 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -239,6 +239,18 @@ done: } } +static pgprot_t __acpi_get_writethrough_mem_attribute(void) +{ + /* + * Although UEFI specifies the use of Normal Write-through for + * EFI_MEMORY_WT, it is seldom used in practice and not implemented + * by most (all?) CPUs. Rather than allocate a MAIR just for this + * purpose, emit a warning and use Normal Non-cacheable instead. + */ + pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n"); + return __pgprot(PROT_NORMAL_NC); +} + pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) { /* @@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is * mapped to a corresponding MAIR attribute encoding. * The EFI memory attribute advises all possible capabilities - * of a memory region. We use the most efficient capability. + * of a memory region. */ u64 attr; @@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) attr = efi_mem_attributes(addr); if (attr & EFI_MEMORY_WB) return PAGE_KERNEL; - if (attr & EFI_MEMORY_WT) - return __pgprot(PROT_NORMAL_WT); if (attr & EFI_MEMORY_WC) return __pgprot(PROT_NORMAL_NC); + if (attr & EFI_MEMORY_WT) + return __acpi_get_writethrough_mem_attribute(); return __pgprot(PROT_DEVICE_nGnRnE); } @@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) default: if (region->attribute & EFI_MEMORY_WB) prot = PAGE_KERNEL; - else if (region->attribute & EFI_MEMORY_WT) - prot = __pgprot(PROT_NORMAL_WT); else if (region->attribute & EFI_MEMORY_WC) prot = __pgprot(PROT_NORMAL_NC); + else if (region->attribute & EFI_MEMORY_WT) + prot = __acpi_get_writethrough_mem_attribute(); } } return __ioremap(phys, size, prot); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a6ccd6557d19..c5c994a73a64 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -196,7 +196,7 @@ SYM_CODE_END(preserve_boot_args) and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) mov \istart, \ptrs mul \istart, \istart, \count - add \iend, \iend, \istart // iend += (count - 1) * ptrs + add \iend, \iend, \istart // iend += count * ptrs // our entries span multiple tables lsr \istart, \vstart, \shift @@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) #endif 1: ldr_l x4, idmap_ptrs_per_pgd - mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 @@ -568,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 -1: str w0, [x1] // This CPU has booted in EL1 +1: str w0, [x1] // Save CPU boot mode dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b7a35a03e9b9..8ed66142b088 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -375,7 +375,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) * faults in case uaccess_enable() is inadvertently called by the init * thread. */ - init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir); + init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); #endif if (boot_args[1] || boot_args[2] || boot_args[3]) { |