aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/nommu.c
blob: c42debaded95c3486f02bf08f695650912af935f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
// SPDX-License-Identifier: GPL-2.0-only
/*
 *  linux/arch/arm/mm/nommu.c
 *
 * ARM uCLinux supporting functions.
 */
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/kernel.h>

#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/procinfo.h>

#include "mm.h"

unsigned long vectors_base;

#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif

#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
unsigned long setup_vectors_base(void)
{
	unsigned long reg = get_cr();

	set_cr(reg | CR_V);
	return 0xffff0000;
}
#else /* CONFIG_CPU_HIGH_VECTOR */
/* Write exception base address to VBAR */
static inline void set_vbar(unsigned long val)
{
	asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
}

/*
 * Security extensions, bits[7:4], permitted values,
 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
 */
static inline bool security_extensions_enabled(void)
{
	/* Check CPUID Identification Scheme before ID_PFR1 read */
	if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
		return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
			cpuid_feature_extract(CPUID_EXT_PFR1, 20);
	return 0;
}

unsigned long setup_vectors_base(void)
{
	unsigned long base = 0, reg = get_cr();

	set_cr(reg & ~CR_V);
	if (security_extensions_enabled()) {
		if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
			base = CONFIG_DRAM_BASE;
		set_vbar(base);
	} else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
		if (CONFIG_DRAM_BASE != 0)
			pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
	}

	return base;
}
#endif /* CONFIG_CPU_HIGH_VECTOR */
#endif /* CONFIG_CPU_CP15 */

void __init arm_mm_memblock_reserve(void)
{
#ifndef CONFIG_CPU_V7M
	vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
	/*
	 * Register the exception vector page.
	 * some architectures which the DRAM is the exception vector to trap,
	 * alloc_page breaks with error, although it is not NULL, but "0."
	 */
	memblock_reserve(vectors_base, 2 * PAGE_SIZE);
#else /* ifndef CONFIG_CPU_V7M */
	/*
	 * There is no dedicated vector page on V7-M. So nothing needs to be
	 * reserved here.
	 */
#endif
	/*
	 * In any case, always ensure address 0 is never used as many things
	 * get very confused if 0 is returned as a legitimate address.
	 */
	memblock_reserve(0, 1);
}

static void __init adjust_lowmem_bounds_mpu(void)
{
	unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;

	switch (pmsa) {
	case MMFR0_PMSAv7:
		pmsav7_adjust_lowmem_bounds();
		break;
	case MMFR0_PMSAv8:
		pmsav8_adjust_lowmem_bounds();
		break;
	default:
		break;
	}
}

static void __init mpu_setup(void)
{
	unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;

	switch (pmsa) {
	case MMFR0_PMSAv7:
		pmsav7_setup();
		break;
	case MMFR0_PMSAv8:
		pmsav8_setup();
		break;
	default:
		break;
	}
}

void __init adjust_lowmem_bounds(void)
{
	phys_addr_t end;
	adjust_lowmem_bounds_mpu();
	end = memblock_end_of_DRAM();
	high_memory = __va(end - 1) + 1;
	memblock_set_current_limit(end);
}

/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps, and sets up the zero page, bad page and bad page tables.
 */
void __init paging_init(const struct machine_desc *mdesc)
{
	early_trap_init((void *)vectors_base);
	mpu_setup();
	bootmem_init();
}

/*
 * We don't need to do anything here for nommu machines.
 */
void setup_mm_for_reboot(void)
{
}

void flush_dcache_page(struct page *page)
{
	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_dcache_page);

void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long uaddr, void *dst, const void *src,
		       unsigned long len)
{
	memcpy(dst, src, len);
	if (vma->vm_flags & VM_EXEC)
		__cpuc_coherent_user_range(uaddr, uaddr + len);
}

void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
				size_t size, unsigned int mtype)
{
	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
		return NULL;
	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);

void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
				   unsigned int mtype, void *caller)
{
	return (void __iomem *)phys_addr;
}

void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);

void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
				    __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);

void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
{
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
				    __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);

void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
				    __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);

#ifdef CONFIG_PCI

#include <asm/mach/map.h>

void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
				   __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif

void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
	return (void *)phys_addr;
}

void iounmap(volatile void __iomem *io_addr)
{
}
EXPORT_SYMBOL(iounmap);