aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorLinus Torvalds2009-09-15 09:39:44 -0700
committerLinus Torvalds2009-09-15 09:39:44 -0700
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /arch/sparc
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/kernel/smp_64.c132
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S8
3 files changed, 23 insertions, 119 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2bd5c287538a..86b82348b97c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -99,7 +99,7 @@ config AUDIT_ARCH
config HAVE_SETUP_PER_CPU_AREA
def_bool y if SPARC64
-config HAVE_DYNAMIC_PER_CPU_AREA
+config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y if SPARC64
config GENERIC_HARDIRQS_NO__DO_IRQ
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3691907a43b4..ff68373ce6d6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1389,8 +1389,8 @@ void smp_send_stop(void)
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
-static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
- unsigned long align)
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
+ size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1415,127 +1415,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
#endif
}
-static size_t pcpur_size __initdata;
-static void **pcpur_ptrs __initdata;
-
-static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+static void __init pcpu_free_bootmem(void *ptr, size_t size)
{
- size_t off = (size_t)pageno << PAGE_SHIFT;
-
- if (off >= pcpur_size)
- return NULL;
-
- return virt_to_page(pcpur_ptrs[cpu] + off);
+ free_bootmem(__pa(ptr), size);
}
-#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
-
-static void __init pcpu_map_range(unsigned long start, unsigned long end,
- struct page *page)
+static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- unsigned long pfn = page_to_pfn(page);
- unsigned long pte_base;
-
- BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
-
- pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
- _PAGE_CP_4U | _PAGE_CV_4U |
- _PAGE_P_4U | _PAGE_W_4U);
- if (tlb_type == hypervisor)
- pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
- _PAGE_CP_4V | _PAGE_CV_4V |
- _PAGE_P_4V | _PAGE_W_4V);
-
- while (start < end) {
- pgd_t *pgd = pgd_offset_k(start);
- unsigned long this_end;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pud = pud_offset(pgd, start);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, start);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- pte = pte_offset_kernel(pmd, start);
- this_end = (start + PMD_SIZE) & PMD_MASK;
- if (this_end > end)
- this_end = end;
-
- while (start < this_end) {
- unsigned long paddr = pfn << PAGE_SHIFT;
-
- pte_val(*pte) = (paddr | pte_base);
-
- start += PAGE_SIZE;
- pte++;
- pfn++;
- }
- }
+ if (cpu_to_node(from) == cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
}
void __init setup_per_cpu_areas(void)
{
- size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
- static struct vm_struct vm;
- unsigned long delta, cpu;
- size_t pcpu_unit_size;
- size_t ptrs_size;
-
- pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
- PERCPU_DYNAMIC_RESERVE);
- dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
-
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
- ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0]));
- pcpur_ptrs = alloc_bootmem(ptrs_size);
-
- for_each_possible_cpu(cpu) {
- pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
- PCPU_CHUNK_SIZE);
-
- free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
- PCPU_CHUNK_SIZE - pcpur_size);
-
- memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
- }
-
- /* allocate address and map */
- vm.flags = VM_ALLOC;
- vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE;
- vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
-
- for_each_possible_cpu(cpu) {
- unsigned long start = (unsigned long) vm.addr;
- unsigned long end;
-
- start += cpu * PCPU_CHUNK_SIZE;
- end = start + PCPU_CHUNK_SIZE;
- pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
- }
-
- pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
- PERCPU_MODULE_RESERVE, dyn_size,
- PCPU_CHUNK_SIZE, vm.addr, NULL);
-
- free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, 4 << 20,
+ pcpu_cpu_distance, pcpu_alloc_bootmem,
+ pcpu_free_bootmem);
+ if (rc)
+ panic("failed to initialize first chunk (%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
- for_each_possible_cpu(cpu) {
- __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
- }
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index fcbbd000ec08..866390feb683 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -171,12 +171,8 @@ SECTIONS
}
_end = . ;
- /DISCARD/ : {
- EXIT_TEXT
- EXIT_DATA
- *(.exitcall.exit)
- }
-
STABS_DEBUG
DWARF_DEBUG
+
+ DISCARDS
}