diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 24 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 10 |
9 files changed, 52 insertions, 48 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 9c90e66cffb6..354ba3c09ef3 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -112,7 +112,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, tsize = __ilog2(size) - 10; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) if ((flags & _PAGE_NO_CACHE) == 0) flags |= _PAGE_COHERENT; #endif diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 463174a4a647..3b49e3295901 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -701,7 +701,7 @@ htab_pte_insert_failure: #endif /* CONFIG_PPC_64K_PAGES */ -#ifdef CONFIG_PPC_HAS_HASH_64K +#ifdef CONFIG_PPC_64K_PAGES /***************************************************************************** * * @@ -993,7 +993,7 @@ ht64_pte_insert_failure: b ht64_bail -#endif /* CONFIG_PPC_HAS_HASH_64K */ +#endif /* CONFIG_PPC_64K_PAGES */ /***************************************************************************** diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5ec987f65b2c..aee70171355b 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -640,7 +640,7 @@ extern u32 ht64_call_hpte_updatepp[]; static void __init htab_finish_init(void) { -#ifdef CONFIG_PPC_HAS_HASH_64K +#ifdef CONFIG_PPC_64K_PAGES patch_branch(ht64_call_hpte_insert1, ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); @@ -653,7 +653,7 @@ static void __init htab_finish_init(void) patch_branch(ht64_call_hpte_updatepp, ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); -#endif /* CONFIG_PPC_HAS_HASH_64K */ +#endif /* CONFIG_PPC_64K_PAGES */ patch_branch(htab_call_hpte_insert1, ppc_function_entry(ppc_md.hpte_insert), @@ -1151,12 +1151,12 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, check_paca_psize(ea, mm, psize, user_region); #endif /* CONFIG_PPC_64K_PAGES */ -#ifdef CONFIG_PPC_HAS_HASH_64K +#ifdef CONFIG_PPC_64K_PAGES if (psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, flags, ssize); else -#endif /* CONFIG_PPC_HAS_HASH_64K */ +#endif /* CONFIG_PPC_64K_PAGES */ { int spp = subpage_protection(mm, ea); if (access & spp) @@ -1264,12 +1264,12 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, update_flags |= HPTE_LOCAL_UPDATE; /* Hash it in */ -#ifdef CONFIG_PPC_HAS_HASH_64K +#ifdef CONFIG_PPC_64K_PAGES if (mm->context.user_psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, update_flags, ssize); else -#endif /* CONFIG_PPC_HAS_HASH_64K */ +#endif /* CONFIG_PPC_64K_PAGES */ rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, ssize, subpage_protection(mm, ea)); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index bb0bd7025cb8..06c14523b787 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -808,14 +808,6 @@ static int __init add_huge_page_size(unsigned long long size) if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) return -EINVAL; -#ifdef CONFIG_SPU_FS_64K_LS - /* Disable support for 64K huge pages when 64K SPU local store - * support is enabled as the current implementation conflicts. - */ - if (shift == PAGE_SHIFT_64K) - return -EINVAL; -#endif /* CONFIG_SPU_FS_64K_LS */ - BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); /* Return if huge page size has already been setup */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0f11819d8f1d..e1fe333da946 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -414,17 +414,17 @@ void flush_dcache_icache_page(struct page *page) return; } #endif -#ifdef CONFIG_BOOKE - { +#if defined(CONFIG_8xx) || defined(CONFIG_PPC64) + /* On 8xx there is no need to kmap since highmem is not supported */ + __flush_dcache_icache(page_address(page)); +#else + if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { void *start = kmap_atomic(page); __flush_dcache_icache(start); kunmap_atomic(start); + } else { + __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); } -#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) - /* On 8xx there is no need to kmap since highmem is not supported */ - __flush_dcache_icache(page_address(page)); -#else - __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); #endif } EXPORT_SYMBOL(flush_dcache_icache_page); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 5e80621d9324..8b9502adaf79 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -225,7 +225,7 @@ static void initialize_distance_lookup_table(int nid, for (i = 0; i < distance_ref_points_depth; i++) { const __be32 *entry; - entry = &associativity[be32_to_cpu(distance_ref_points[i])]; + entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1]; distance_lookup_table[nid][i] = of_read_number(entry, 1); } } @@ -248,8 +248,12 @@ static int associativity_to_nid(const __be32 *associativity) nid = -1; if (nid > 0 && - of_read_number(associativity, 1) >= distance_ref_points_depth) - initialize_distance_lookup_table(nid, associativity); + of_read_number(associativity, 1) >= distance_ref_points_depth) { + /* + * Skip the length field and send start of associativity array + */ + initialize_distance_lookup_table(nid, associativity + 1); + } out: return nid; @@ -507,6 +511,12 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, if (nid == 0xffff || nid >= MAX_NUMNODES) nid = default_nid; + + if (nid > 0) { + index = drmem->aa_index * aa->array_sz; + initialize_distance_lookup_table(nid, + &aa->arrays[index]); + } } return nid; diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 876232d64126..e92cb2146b18 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -149,17 +149,7 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags) #endif /* !CONFIG_PPC_MMU_NOHASH */ } -#ifdef CONFIG_PPC_BOOK3E_64 - /* - * With hardware tablewalk, a sync is needed to ensure that - * subsequent accesses see the PTE we just wrote. Unlike userspace - * mappings, we can't tolerate spurious faults, so make sure - * the new PTE will be seen the first time. - */ - mb(); -#else smp_wmb(); -#endif return 0; } diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 6e450ca66526..8a32a2be3c53 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -41,9 +41,9 @@ static void slb_allocate(unsigned long ea) (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) static inline unsigned long mk_esid_data(unsigned long ea, int ssize, - unsigned long slot) + unsigned long entry) { - return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry; } static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, @@ -249,11 +249,24 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) static inline void patch_slb_encoding(unsigned int *insn_addr, unsigned int immed) { - int insn = (*insn_addr & 0xffff0000) | immed; + + /* + * This function patches either an li or a cmpldi instruction with + * a new immediate value. This relies on the fact that both li + * (which is actually addi) and cmpldi both take a 16-bit immediate + * value, and it is situated in the same location in the instruction, + * ie. bits 16-31 (Big endian bit order) or the lower 16 bits. + * The signedness of the immediate operand differs between the two + * instructions however this code is only ever patching a small value, + * much less than 1 << 15, so we can get away with it. + * To patch the value we read the existing instruction, clear the + * immediate value, and or in our new value, then write the instruction + * back. + */ + unsigned int insn = (*insn_addr & 0xffff0000) | immed; patch_instruction(insn_addr, insn); } -extern u32 slb_compare_rr_to_size[]; extern u32 slb_miss_kernel_load_linear[]; extern u32 slb_miss_kernel_load_io[]; extern u32 slb_compare_rr_to_size[]; @@ -309,12 +322,11 @@ void slb_initialize(void) lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; - /* Invalidate the entire SLB (even slot 0) & all the ERATS */ + /* Invalidate the entire SLB (even entry 0) & all the ERATS */ asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); - create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); /* For the boot cpu, we're running on the stack in init_thread_union, diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 765b419883f2..e4185581c5a7 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -308,11 +308,11 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */ * * MAS6:IND should be already set based on MAS4 */ -1: lbarx r15,0,r11 lhz r10,PACAPACAINDEX(r13) - cmpdi r15,0 - cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */ addi r10,r10,1 + crclr cr1*4+eq /* set cr1.eq = 0 for non-recursive */ +1: lbarx r15,0,r11 + cmpdi r15,0 bne 2f stbcx. r10,0,r11 bne 1b @@ -320,9 +320,9 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */ .subsection 1 2: cmpd cr1,r15,r10 /* recursive lock due to mcheck/crit/etc? */ beq cr1,3b /* unlock will happen if cr1.eq = 0 */ - lbz r15,0(r11) +10: lbz r15,0(r11) cmpdi r15,0 - bne 2b + bne 10b b 1b .previous |