diff options
author | Linus Torvalds | 2013-08-02 14:39:49 -0700 |
---|---|---|
committer | Linus Torvalds | 2013-08-02 14:39:49 -0700 |
commit | e7e2e511ba7d5850a7ecedcb452acf9d25a9a768 (patch) | |
tree | 9dcee89d54bfc0430ef00839aead87be6189a21a /arch/powerpc | |
parent | 6d039f8f037fda35da8124f09c4d2bbe55c9a575 (diff) | |
parent | fe956a1d4081ce1a959f87df397a15e252201f10 (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Ben Herrenschmidt:
"Here is not quite a handful of powerpc fixes for rc3.
The windfarm fix is a regression fix (though not a new one), the PMU
interrupt rename is not a fix per-se but has been submitted a long
time ago and I kept forgetting to put it in (it puts us back in sync
with x86), the other perf bit is just about putting an API/ABI bit
definition in the right place for userspace to consume, and finally,
we have a fix for the VPHN (Virtual Partition Home Node) feature
(notification that the hypervisor is moving nodes around) which could
cause lockups so we may as well fix it now"
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
powerpc/windfarm: Fix noisy slots-fan on Xserve (rm31)
powerpc: VPHN topology change updates all siblings
powerpc/perf: Export PERF_EVENT_CONFIG_EBB_SHIFT to userspace
powerpc: Rename PMU interrupts from CNT to PMI
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/perf_event_server.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/smp.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/perf_event.h | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 59 | ||||
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 2 | ||||
-rw-r--r-- | arch/powerpc/perf/power8-pmu.c | 6 |
8 files changed, 73 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 2dd7bfc459be..8b2492644754 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -12,6 +12,7 @@ #include <linux/types.h> #include <asm/hw_irq.h> #include <linux/device.h> +#include <uapi/asm/perf_event.h> #define MAX_HWEVENTS 8 #define MAX_EVENT_ALTERNATIVES 8 @@ -69,11 +70,6 @@ struct power_pmu { #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ -/* - * We use the event config bit 63 as a flag to request EBB. - */ -#define EVENT_CONFIG_EBB_SHIFT 63 - extern int register_power_pmu(struct power_pmu *); struct pt_regs; diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index ffbaabebcdca..48cfc858abd6 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu); #define smp_setup_cpu_maps() static inline void inhibit_secondary_onlining(void) {} static inline void uninhibit_secondary_onlining(void) {} +static inline const struct cpumask *cpu_sibling_mask(int cpu) +{ + return cpumask_of(cpu); +} #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index 5182c8622b54..48be855ef37b 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -20,6 +20,7 @@ header-y += mman.h header-y += msgbuf.h header-y += nvram.h header-y += param.h +header-y += perf_event.h header-y += poll.h header-y += posix_types.h header-y += ps3fb.h diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h new file mode 100644 index 000000000000..80a4d40cf5bc --- /dev/null +++ b/arch/powerpc/include/uapi/asm/perf_event.h @@ -0,0 +1,18 @@ +/* + * Copyright 2013 Michael Ellerman, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + */ + +#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H +#define _UAPI_ASM_POWERPC_PERF_EVENT_H + +/* + * We use bit 63 of perf_event_attr.config as a flag to request EBB. + */ +#define PERF_EVENT_CONFIG_EBB_SHIFT 63 + +#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2e51cde616d2..c69440cef7af 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); seq_printf(p, " Spurious interrupts\n"); - seq_printf(p, "%*s: ", prec, "CNT"); + seq_printf(p, "%*s: ", prec, "PMI"); for_each_online_cpu(j) seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); seq_printf(p, " Performance monitoring interrupts\n"); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 08397217e8ac..5850798826cd 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -27,6 +27,7 @@ #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <asm/cputhreads.h> #include <asm/sparsemem.h> #include <asm/prom.h> #include <asm/smp.h> @@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void) } } if (changed) { - cpumask_set_cpu(cpu, changes); + cpumask_or(changes, changes, cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); } } @@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data) if (!data) return -EINVAL; - cpu = get_cpu(); + cpu = smp_processor_id(); for (update = data; update; update = update->next) { if (cpu != update->cpu) @@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data) */ int arch_update_cpu_topology(void) { - unsigned int cpu, changed = 0; + unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; cpumask_t updated_cpus; struct device *dev; - int weight, i = 0; + int weight, new_nid, i = 0; weight = cpumask_weight(&cpu_associativity_changes_mask); if (!weight) @@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void) cpumask_clear(&updated_cpus); for_each_cpu(cpu, &cpu_associativity_changes_mask) { - ud = &updates[i++]; - ud->cpu = cpu; - vphn_get_associativity(cpu, associativity); - ud->new_nid = associativity_to_nid(associativity); - - if (ud->new_nid < 0 || !node_online(ud->new_nid)) - ud->new_nid = first_online_node; + /* + * If siblings aren't flagged for changes, updates list + * will be too short. Skip on this update and set for next + * update. + */ + if (!cpumask_subset(cpu_sibling_mask(cpu), + &cpu_associativity_changes_mask)) { + pr_info("Sibling bits not set for associativity " + "change, cpu%d\n", cpu); + cpumask_or(&cpu_associativity_changes_mask, + &cpu_associativity_changes_mask, + cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); + continue; + } - ud->old_nid = numa_cpu_lookup_table[cpu]; - cpumask_set_cpu(cpu, &updated_cpus); + /* Use associativity from first thread for all siblings */ + vphn_get_associativity(cpu, associativity); + new_nid = associativity_to_nid(associativity); + if (new_nid < 0 || !node_online(new_nid)) + new_nid = first_online_node; + + if (new_nid == numa_cpu_lookup_table[cpu]) { + cpumask_andnot(&cpu_associativity_changes_mask, + &cpu_associativity_changes_mask, + cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); + continue; + } - if (i < weight) - ud->next = &updates[i]; + for_each_cpu(sibling, cpu_sibling_mask(cpu)) { + ud = &updates[i++]; + ud->cpu = sibling; + ud->new_nid = new_nid; + ud->old_nid = numa_cpu_lookup_table[sibling]; + cpumask_set_cpu(sibling, &updated_cpus); + if (i < weight) + ud->next = &updates[i]; + } + cpu = cpu_last_thread_sibling(cpu); } stop_machine(update_cpu_topology, &updates[0], &updated_cpus); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 24a45f91c65f..eeae308cf982 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event) * use bit 63 of the event code for something else if they wish. */ return (ppmu->flags & PPMU_EBB) && - ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1); + ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); } static int ebb_event_check(struct perf_event *event) diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 7466374d2787..2ee4a707f0df 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -118,7 +118,7 @@ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ - (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \ + (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \ EVENT_PSEL_MASK) /* MMCRA IFM bits - POWER8 */ @@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; - ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; + ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; /* Clear the EBB bit in the event, so event checks work below */ - event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT); + event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT); if (pmc) { if (pmc > 6) |