diff options
author | Linus Torvalds | 2022-01-18 06:36:35 +0200 |
---|---|---|
committer | Linus Torvalds | 2022-01-18 06:36:35 +0200 |
commit | 62b488875c0551822ac3b961d04800d4c7a655d9 (patch) | |
tree | 87ff5f6ab769c57e2052e40e24ff2d3ecf546abf /arch/arc | |
parent | 57d17378a4a042401b0c2fe211e5a0e3a276cb3d (diff) | |
parent | 8f67f65d121cc3bbb4ffaae80e880aeb307d49f4 (diff) |
Merge tag 'arc-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC fixes from Vineet Gupta:
"Nothing too exciting for now"
* tag 'arc-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
arc: use swap() to make code cleaner
arc: perf: Move static structs to where they're really used
ARC: perf: fix misleading comment about pmu vs counter stop
arc: Replace lkml.org links with lore
ARC: perf: Remove redundant initialization of variable idx
ARC: thread_info.h: correct two typos in a comment
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/include/asm/irqflags-compact.h | 8 | ||||
-rw-r--r-- | arch/arc/include/asm/perf_event.h | 162 | ||||
-rw-r--r-- | arch/arc/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/arc/kernel/perf_event.c | 166 | ||||
-rw-r--r-- | arch/arc/kernel/unwind.c | 11 | ||||
-rw-r--r-- | arch/arc/mm/dma.c | 2 | ||||
-rw-r--r-- | arch/arc/plat-axs10x/axs10x.c | 2 | ||||
-rw-r--r-- | arch/arc/plat-hsdk/platform.c | 2 |
8 files changed, 178 insertions, 179 deletions
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h index 863d63ad18d6..0d63e568d64c 100644 --- a/arch/arc/include/asm/irqflags-compact.h +++ b/arch/arc/include/asm/irqflags-compact.h @@ -50,8 +50,12 @@ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register) * * Noted at the time of Abilis Timer List corruption - * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67 - * Reasoning : https://lkml.org/lkml/2013/4/8/15 + * + * Orig Bug + Rejected solution: + * https://lore.kernel.org/lkml/1364553218-31255-1-git-send-email-vgupta@synopsys.com + * + * Reasoning: + * https://lore.kernel.org/lkml/CA+55aFyFWjpSVQM6M266tKrG_ZXJzZ-nYejpmXYQXbrr42mGPQ@mail.gmail.com * ******************************************************************/ diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index e1971d34ef30..4c919c0f4b30 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -63,166 +63,4 @@ struct arc_reg_cc_build { #define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8) -/* - * Some ARC pct quirks: - * - * PERF_COUNT_HW_STALLED_CYCLES_BACKEND - * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND - * The ARC 700 can either measure stalls per pipeline stage, or all stalls - * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND - * and all pipeline flushes (e.g. caused by mispredicts, etc.) to - * STALLED_CYCLES_FRONTEND. - * - * We could start multiple performance counters and combine everything - * afterwards, but that makes it complicated. - * - * Note that I$ cache misses aren't counted by either of the two! - */ - -/* - * ARC PCT has hardware conditions with fixed "names" but variable "indexes" - * (based on a specific RTL build) - * Below is the static map between perf generic/arc specific event_id and - * h/w condition names. - * At the time of probe, we loop thru each index and find it's name to - * complete the mapping of perf event_id to h/w index as latter is needed - * to program the counter really - */ -static const char * const arc_pmu_ev_hw_map[] = { - /* count cycles */ - [PERF_COUNT_HW_CPU_CYCLES] = "crun", - [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", - [PERF_COUNT_HW_BUS_CYCLES] = "crun", - - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", - - /* counts condition */ - [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - /* All jump instructions that are taken */ - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", -#ifdef CONFIG_ISA_ARCV2 - [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", -#else - [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ - [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ -#endif - [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ - [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */ - - [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */ - [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */ - [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */ - [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */ - [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */ - - [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */ - [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */ -}; - -#define C(_x) PERF_COUNT_HW_CACHE_##_x -#define CACHE_OP_UNSUPPORTED 0xffff - -static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { - [C(L1D)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, - [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC, - [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(L1I)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS, - [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(LL)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(DTLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, - [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, - }, - /* DTLB LD/ST Miss not segregated by h/w*/ - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(ITLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(BPU)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, - [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, - [C(NODE)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, - }, - }, -}; - #endif /* __ASM_PERF_EVENT_H */ diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index c0942c24d401..d36863e34bfc 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -99,8 +99,8 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) /* * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. - * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a - * syscall, so all that reamins to be tested is _TIF_WORK_MASK + * SYSCALL_TRACE is anyway separately/unconditionally tested right after a + * syscall, so all that remains to be tested is _TIF_WORK_MASK */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 145722f80c9b..adff957962da 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -17,6 +17,168 @@ /* HW holds 8 symbols + one for null terminator */ #define ARCPMU_EVENT_NAME_LEN 9 +/* + * Some ARC pct quirks: + * + * PERF_COUNT_HW_STALLED_CYCLES_BACKEND + * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND + * The ARC 700 can either measure stalls per pipeline stage, or all stalls + * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND + * and all pipeline flushes (e.g. caused by mispredicts, etc.) to + * STALLED_CYCLES_FRONTEND. + * + * We could start multiple performance counters and combine everything + * afterwards, but that makes it complicated. + * + * Note that I$ cache misses aren't counted by either of the two! + */ + +/* + * ARC PCT has hardware conditions with fixed "names" but variable "indexes" + * (based on a specific RTL build) + * Below is the static map between perf generic/arc specific event_id and + * h/w condition names. + * At the time of probe, we loop thru each index and find it's name to + * complete the mapping of perf event_id to h/w index as latter is needed + * to program the counter really + */ +static const char * const arc_pmu_ev_hw_map[] = { + /* count cycles */ + [PERF_COUNT_HW_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", + [PERF_COUNT_HW_BUS_CYCLES] = "crun", + + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", + + /* counts condition */ + [PERF_COUNT_HW_INSTRUCTIONS] = "iall", + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", +#ifdef CONFIG_ISA_ARCV2 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", +#else + [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ + [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ +#endif + [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ + [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */ + + [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */ + [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */ + [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */ + [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */ + [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */ + + [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */ + [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */ +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff + +static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC, + [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS, + [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB, + }, + /* DTLB LD/ST Miss not segregated by h/w*/ + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, + [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + enum arc_pmu_attr_groups { ARCPMU_ATTR_GR_EVENTS, ARCPMU_ATTR_GR_FORMATS, @@ -328,7 +490,7 @@ static void arc_pmu_stop(struct perf_event *event, int flags) } if (!(event->hw.state & PERF_HES_STOPPED)) { - /* stop ARC pmu here */ + /* stop hw counter here */ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* condition code #0 is always "never" */ @@ -361,7 +523,7 @@ static int arc_pmu_add(struct perf_event *event, int flags) { struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; idx = ffz(pmu_cpu->used_mask[0]); if (idx == arc_pmu->n_counters) diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 9e28058cdba8..200270a94558 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -245,14 +245,9 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) { struct eh_frame_hdr_table_entry *e1 = p1; struct eh_frame_hdr_table_entry *e2 = p2; - unsigned long v; - - v = e1->start; - e1->start = e2->start; - e2->start = v; - v = e1->fde; - e1->fde = e2->fde; - e2->fde = v; + + swap(e1->start, e2->start); + swap(e1->fde, e2->fde); } static void init_unwind_hdr(struct unwind_table *table, diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 517988e60cfc..2a7fbbb83b70 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -32,7 +32,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) /* * Cache operations depending on function and direction argument, inspired by - * https://lkml.org/lkml/2018/5/18/979 + * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] * dma-mapping: provide a generic dma-noncoherent implementation)" * diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 63ea5a606ecd..b821df7b0089 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -50,7 +50,7 @@ static void __init axs10x_enable_gpio_intc_wire(void) * Current implementation of "irq-dw-apb-ictl" driver doesn't work well * with stacked INTCs. In particular problem happens if its master INTC * not yet instantiated. See discussion here - - * https://lkml.org/lkml/2015/3/4/755 + * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com * * So setup the first gpio block as a passive pass thru and hide it from * DT hardware topology - connect MB intc directly to cpu intc diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index b3ea1fa11f87..c4a875b22352 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -52,7 +52,7 @@ static void __init hsdk_enable_gpio_intc_wire(void) * Current implementation of "irq-dw-apb-ictl" driver doesn't work well * with stacked INTCs. In particular problem happens if its master INTC * not yet instantiated. See discussion here - - * https://lkml.org/lkml/2015/3/4/755 + * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com * * So setup the first gpio block as a passive pass thru and hide it from * DT hardware topology - connect intc directly to cpu intc |