diff options
author | Paolo Bonzini | 2021-04-23 07:41:17 -0400 |
---|---|---|
committer | Paolo Bonzini | 2021-04-23 07:41:17 -0400 |
commit | c4f71901d53b6d8a4703389459d9f99fbd80ffd2 (patch) | |
tree | af8a0c33cec6dfb8a5d5cd7fcef245ab02b12691 /drivers | |
parent | fd49e8ee70b306a003323a17bbcc0633f322c135 (diff) | |
parent | 9a8aae605b80fc0a830cdce747eed48e11acc067 (diff) |
Merge tag 'kvmarm-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 updates for Linux 5.13
New features:
- Stage-2 isolation for the host kernel when running in protected mode
- Guest SVE support when running in nVHE mode
- Force W^X hypervisor mappings in nVHE mode
- ITS save/restore for guests using direct injection with GICv4.1
- nVHE panics now produce readable backtraces
- Guest support for PTP using the ptp_kvm driver
- Performance improvements in the S2 fault handler
- Alexandru is now a reviewer (not really a new feature...)
Fixes:
- Proper emulation of the GICR_TYPER register
- Handle the complete set of relocation in the nVHE EL2 object
- Get rid of the oprofile dependency in the PMU code (and of the
oprofile body parts at the same time)
- Debug and SPE fixes
- Fix vcpu reset
Diffstat (limited to 'drivers')
23 files changed, 1955 insertions, 152 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index d0177824c518..e0f167e5e792 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -16,6 +16,7 @@ #include <linux/cpu_pm.h> #include <linux/clockchips.h> #include <linux/clocksource.h> +#include <linux/clocksource_ids.h> #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/of_address.h> @@ -24,6 +25,8 @@ #include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/acpi.h> +#include <linux/arm-smccc.h> +#include <linux/ptp_kvm.h> #include <asm/arch_timer.h> #include <asm/virt.h> @@ -191,6 +194,7 @@ static u64 arch_counter_read_cc(const struct cyclecounter *cc) static struct clocksource clocksource_counter = { .name = "arch_sys_counter", + .id = CSID_ARM_ARCH_COUNTER, .rating = 400, .read = arch_counter_read, .mask = CLOCKSOURCE_MASK(56), @@ -1657,3 +1661,35 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table) } TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); #endif + +int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, + struct clocksource **cs) +{ + struct arm_smccc_res hvc_res; + u32 ptp_counter; + ktime_t ktime; + + if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY)) + return -EOPNOTSUPP; + + if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) + ptp_counter = KVM_PTP_VIRT_COUNTER; + else + ptp_counter = KVM_PTP_PHYS_COUNTER; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, + ptp_counter, &hvc_res); + + if ((int)(hvc_res.a0) < 0) + return -EOPNOTSUPP; + + ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1; + *ts = ktime_to_timespec64(ktime); + if (cycle) + *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3; + if (cs) + *cs = &clocksource_counter; + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp); diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index f5fc429cae3f..69e296f02902 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -23,6 +23,7 @@ #include <asm/cpuidle.h> #include <asm/cputype.h> +#include <asm/hypervisor.h> #include <asm/system_misc.h> #include <asm/smp_plat.h> #include <asm/suspend.h> @@ -498,6 +499,7 @@ static int __init psci_probe(void) psci_init_cpu_suspend(); psci_init_system_suspend(); psci_init_system_reset2(); + kvm_init_hyp_services(); } return 0; diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile index 72ab84042832..40d19144a860 100644 --- a/drivers/firmware/smccc/Makefile +++ b/drivers/firmware/smccc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 # -obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o +obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c new file mode 100644 index 000000000000..2d3e866decaa --- /dev/null +++ b/drivers/firmware/smccc/kvm_guest.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "smccc: KVM: " fmt + +#include <linux/arm-smccc.h> +#include <linux/bitmap.h> +#include <linux/kernel.h> +#include <linux/string.h> + +#include <asm/hypervisor.h> + +static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { }; + +void __init kvm_init_hyp_services(void) +{ + struct arm_smccc_res res; + u32 val[4]; + + if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC) + return; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); + if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 || + res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 || + res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 || + res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3) + return; + + memset(&res, 0, sizeof(res)); + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res); + + val[0] = lower_32_bits(res.a0); + val[1] = lower_32_bits(res.a1); + val[2] = lower_32_bits(res.a2); + val[3] = lower_32_bits(res.a3); + + bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS); + + pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n", + res.a3, res.a2, res.a1, res.a0); +} + +bool kvm_arm_hyp_service_available(u32 func_id) +{ + if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS) + return false; + + return test_bit(func_id, __kvm_arm_hyp_services); +} +EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index d52bfc5ed5e4..028f81d702cc 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -8,6 +8,7 @@ #include <linux/cache.h> #include <linux/init.h> #include <linux/arm-smccc.h> +#include <linux/kernel.h> #include <asm/archrandom.h> static u32 smccc_version = ARM_SMCCC_VERSION_1_0; diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 7b44ba22cbe1..84530fd80998 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -97,15 +97,15 @@ config CORESIGHT_SOURCE_ETM3X module will be called coresight-etm3x. config CORESIGHT_SOURCE_ETM4X - tristate "CoreSight Embedded Trace Macrocell 4.x driver" + tristate "CoreSight ETMv4.x / ETE driver" depends on ARM64 select CORESIGHT_LINKS_AND_SINKS select PID_IN_CONTEXTIDR help - This driver provides support for the ETM4.x tracer module, tracing the - instructions that a processor is executing. This is primarily useful - for instruction level tracing. Depending on the implemented version - data tracing may also be available. + This driver provides support for the CoreSight Embedded Trace Macrocell + version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer + modules, tracing the instructions that a processor is executing. This is + primarily useful for instruction level tracing. To compile this driver as a module, choose M here: the module will be called coresight-etm4x. @@ -173,4 +173,18 @@ config CORESIGHT_CTI_INTEGRATION_REGS CTI trigger connections between this and other devices.These registers are not used in normal operation and can leave devices in an inconsistent state. + +config CORESIGHT_TRBE + tristate "Trace Buffer Extension (TRBE) driver" + depends on ARM64 && CORESIGHT_SOURCE_ETM4X + help + This driver provides support for percpu Trace Buffer Extension (TRBE). + TRBE always needs to be used along with it's corresponding percpu ETE + component. ETE generates trace data which is then captured with TRBE. + Unlike traditional sink devices, TRBE is a CPU feature accessible via + system registers. But it's explicit dependency with trace unit (ETE) + requires it to be plugged in as a coresight sink device. + + To compile this driver as a module, choose M here: the module will be + called coresight-trbe. endif diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index f20e357758d1..d60816509755 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -21,5 +21,6 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o +obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \ coresight-cti-sysfs.o diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 0062c8935653..ca75b0b54e1f 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -23,6 +23,7 @@ #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); +static DEFINE_PER_CPU(struct coresight_device *, csdev_sink); /** * struct coresight_node - elements of a path, from source to sink @@ -70,6 +71,18 @@ void coresight_remove_cti_ops(void) } EXPORT_SYMBOL_GPL(coresight_remove_cti_ops); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev) +{ + per_cpu(csdev_sink, cpu) = csdev; +} +EXPORT_SYMBOL_GPL(coresight_set_percpu_sink); + +struct coresight_device *coresight_get_percpu_sink(int cpu) +{ + return per_cpu(csdev_sink, cpu); +} +EXPORT_SYMBOL_GPL(coresight_get_percpu_sink); + static int coresight_id_match(struct device *dev, void *data) { int trace_id, i_trace_id; @@ -784,6 +797,14 @@ static int _coresight_build_path(struct coresight_device *csdev, if (csdev == sink) goto out; + if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) && + sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) { + if (_coresight_build_path(sink, sink, path) == 0) { + found = true; + goto out; + } + } + /* Not a sink - recursively explore each port found on this element */ for (i = 0; i < csdev->pdata->nr_outport; i++) { struct coresight_device *child_dev; @@ -999,8 +1020,12 @@ coresight_find_default_sink(struct coresight_device *csdev) int depth = 0; /* look for a default sink if we have not found for this device */ - if (!csdev->def_sink) - csdev->def_sink = coresight_find_sink(csdev, &depth); + if (!csdev->def_sink) { + if (coresight_is_percpu_source(csdev)) + csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev)); + if (!csdev->def_sink) + csdev->def_sink = coresight_find_sink(csdev, &depth); + } return csdev->def_sink; } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 0f603b4094f2..f123c26b9f54 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -24,7 +24,26 @@ static struct pmu etm_pmu; static bool etm_perf_up; -static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); +/* + * An ETM context for a running event includes the perf aux handle + * and aux_data. For ETM, the aux_data (etm_event_data), consists of + * the trace path and the sink configuration. The event data is accessible + * via perf_get_aux(handle). However, a sink could "end" a perf output + * handle via the IRQ handler. And if the "sink" encounters a failure + * to "begin" another session (e.g due to lack of space in the buffer), + * the handle will be cleared. Thus, the event_data may not be accessible + * from the handle when we get to the etm_event_stop(), which is required + * for stopping the trace path. The event_data is guaranteed to stay alive + * until "free_aux()", which cannot happen as long as the event is active on + * the ETM. Thus the event_data for the session must be part of the ETM context + * to make sure we can disable the trace path. + */ +struct etm_ctxt { + struct perf_output_handle handle; + struct etm_event_data *event_data; +}; + +static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); static DEFINE_PER_CPU(struct coresight_device *, csdev_src); /* @@ -232,6 +251,25 @@ static void etm_free_aux(void *data) schedule_work(&event_data->work); } +/* + * Check if two given sinks are compatible with each other, + * so that they can use the same sink buffers, when an event + * moves around. + */ +static bool sinks_compatible(struct coresight_device *a, + struct coresight_device *b) +{ + if (!a || !b) + return false; + /* + * If the sinks are of the same subtype and driven + * by the same driver, we can use the same buffer + * on these sinks. + */ + return (a->subtype.sink_subtype == b->subtype.sink_subtype) && + (sink_ops(a) == sink_ops(b)); +} + static void *etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { @@ -239,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, int cpu = event->cpu; cpumask_t *mask; struct coresight_device *sink = NULL; + struct coresight_device *user_sink = NULL, *last_sink = NULL; struct etm_event_data *event_data = NULL; event_data = alloc_event_data(cpu); @@ -249,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, /* First get the selected sink from user space. */ if (event->attr.config2) { id = (u32)event->attr.config2; - sink = coresight_get_sink_by_id(id); + sink = user_sink = coresight_get_sink_by_id(id); } mask = &event_data->mask; @@ -277,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, } /* - * No sink provided - look for a default sink for one of the - * devices. At present we only support topology where all CPUs - * use the same sink [N:1], so only need to find one sink. The - * coresight_build_path later will remove any CPU that does not - * attach to the sink, or if we have not found a sink. + * No sink provided - look for a default sink for all the ETMs, + * where this event can be scheduled. + * We allocate the sink specific buffers only once for this + * event. If the ETMs have different default sink devices, we + * can only use a single "type" of sink as the event can carry + * only one sink specific buffer. Thus we have to make sure + * that the sinks are of the same type and driven by the same + * driver, as the one we allocate the buffer for. As such + * we choose the first sink and check if the remaining ETMs + * have a compatible default sink. We don't trace on a CPU + * if the sink is not compatible. */ - if (!sink) + if (!user_sink) { + /* Find the default sink for this ETM */ sink = coresight_find_default_sink(csdev); + if (!sink) { + cpumask_clear_cpu(cpu, mask); + continue; + } + + /* Check if this sink compatible with the last sink */ + if (last_sink && !sinks_compatible(last_sink, sink)) { + cpumask_clear_cpu(cpu, mask); + continue; + } + last_sink = sink; + } /* * Building a path doesn't enable it, it simply builds a @@ -312,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) goto err; - /* Allocate the sink buffer for this session */ + /* + * Allocate the sink buffer for this session. All the sinks + * where this event can be scheduled are ensured to be of the + * same type. Thus the same sink configuration is used by the + * sinks. + */ event_data->snk_config = sink_ops(sink)->alloc_buffer(sink, event, pages, nr_pages, overwrite); @@ -332,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags) { int cpu = smp_processor_id(); struct etm_event_data *event_data; - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); + struct perf_output_handle *handle = &ctxt->handle; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct list_head *path; if (!csdev) goto fail; + /* Have we messed up our tracking ? */ + if (WARN_ON(ctxt->event_data)) + goto fail; + /* * Deal with the ring buffer API and get a handle on the * session's information. @@ -374,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags) if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) goto fail_disable_path; + /* Save the event_data for this ETM */ + ctxt->event_data = event_data; out: return; @@ -392,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode) int cpu = smp_processor_id(); unsigned long size; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); - struct etm_event_data *event_data = perf_get_aux(handle); + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); + struct perf_output_handle *handle = &ctxt->handle; + struct etm_event_data *event_data; struct list_head *path; + /* + * If we still have access to the event_data via handle, + * confirm that we haven't messed up the tracking. + */ + if (handle->event && + WARN_ON(perf_get_aux(handle) != ctxt->event_data)) + return; + + event_data = ctxt->event_data; + /* Clear the event_data as this ETM is stopping the trace. */ + ctxt->event_data = NULL; + if (event->hw.state == PERF_HES_STOPPED) return; + /* We must have a valid event_data for a running event */ + if (WARN_ON(!event_data)) + return; + if (!csdev) return; @@ -416,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode) /* tell the core */ event->hw.state = PERF_HES_STOPPED; - if (mode & PERF_EF_UPDATE) { + /* + * If the handle is not bound to an event anymore + * (e.g, the sink driver was unable to restart the + * handle due to lack of buffer space), we don't + * have to do anything here. + */ + if (handle->event && (mode & PERF_EF_UPDATE)) { if (WARN_ON_ONCE(handle->event != event)) return; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 15016f757828..efb84ced83dd 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <linux/property.h> +#include <asm/barrier.h> #include <asm/sections.h> #include <asm/sysreg.h> #include <asm/local.h> @@ -114,30 +115,91 @@ void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) } } -static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa) +static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit) { - /* Writing 0 to TRCOSLAR unlocks the trace registers */ - etm4x_relaxed_write32(csa, 0x0, TRCOSLAR); - drvdata->os_unlock = true; + u64 res = 0; + + switch (offset) { + ETE_READ_CASES(res) + default : + pr_warn_ratelimited("ete: trying to read unsupported register @%x\n", + offset); + } + + if (!_relaxed) + __iormb(res); /* Imitate the !relaxed I/O helpers */ + + return res; +} + +static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit) +{ + if (!_relaxed) + __iowmb(); /* Imitate the !relaxed I/O helpers */ + if (!_64bit) + val &= GENMASK(31, 0); + + switch (offset) { + ETE_WRITE_CASES(val) + default : + pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n", + offset); + } +} + +static void etm_detect_os_lock(struct etmv4_drvdata *drvdata, + struct csdev_access *csa) +{ + u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR); + + drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr); +} + +static void etm_write_os_lock(struct etmv4_drvdata *drvdata, + struct csdev_access *csa, u32 val) +{ + val = !!val; + + switch (drvdata->os_lock_model) { + case ETM_OSLOCK_PRESENT: + etm4x_relaxed_write32(csa, val, TRCOSLAR); + break; + case ETM_OSLOCK_PE: + write_sysreg_s(val, SYS_OSLAR_EL1); + break; + default: + pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n", + smp_processor_id(), drvdata->os_lock_model); + fallthrough; + case ETM_OSLOCK_NI: + return; + } isb(); } +static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, + struct csdev_access *csa) +{ + WARN_ON(drvdata->cpu != smp_processor_id()); + + /* Writing 0 to OS Lock unlocks the trace unit registers */ + etm_write_os_lock(drvdata, csa, 0x0); + drvdata->os_unlock = true; +} + static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { if (!WARN_ON(!drvdata->csdev)) etm4_os_unlock_csa(drvdata, &drvdata->csdev->access); - } static void etm4_os_lock(struct etmv4_drvdata *drvdata) { if (WARN_ON(!drvdata->csdev)) return; - - /* Writing 0x1 to TRCOSLAR locks the trace registers */ - etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR); + /* Writing 0x1 to OS Lock locks the trace registers */ + etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1); drvdata->os_unlock = false; - isb(); } static void etm4_cs_lock(struct etmv4_drvdata *drvdata, @@ -371,6 +433,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR); } + /* + * ETE mandates that the TRCRSR is written to before + * enabling it. + */ + if (etm4x_is_ete(drvdata)) + etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR); + /* Enable the trace unit */ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR); @@ -654,6 +723,7 @@ static int etm4_enable(struct coresight_device *csdev, static void etm4_disable_hw(void *info) { u32 control; + u64 trfcr; struct etmv4_drvdata *drvdata = info; struct etmv4_config *config = &drvdata->config; struct coresight_device *csdev = drvdata->csdev; @@ -677,18 +747,32 @@ static void etm4_disable_hw(void *info) control &= ~0x1; /* + * If the CPU supports v8.4 Trace filter Control, + * set the ETM to trace prohibited region. + */ + if (drvdata->trfc) { + trfcr = read_sysreg_s(SYS_TRFCR_EL1); + write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE), + SYS_TRFCR_EL1); + isb(); + } + /* * Make sure everything completes before disabling, as recommended * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, * SSTATUS") of ARM IHI 0064D */ dsb(sy); isb(); + /* Trace synchronization barrier, is a nop if not supported */ + tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR); /* wait for TRCSTATR.PMSTABLE to go to '1' */ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + if (drvdata->trfc) + write_sysreg_s(trfcr, SYS_TRFCR_EL1); /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -817,13 +901,24 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata, * ETMs implementing sysreg access must implement TRCDEVARCH. */ devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH); - if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) + switch (devarch & ETM_DEVARCH_ID_MASK) { + case ETM_DEVARCH_ETMv4x_ARCH: + *csa = (struct csdev_access) { + .io_mem = false, + .read = etm4x_sysreg_read, + .write = etm4x_sysreg_write, + }; + break; + case ETM_DEVARCH_ETE_ARCH: + *csa = (struct csdev_access) { + .io_mem = false, + .read = ete_sysreg_read, + .write = ete_sysreg_write, + }; + break; + default: return false; - *csa = (struct csdev_access) { - .io_mem = false, - .read = etm4x_sysreg_read, - .write = etm4x_sysreg_write, - }; + } drvdata->arch = etm_devarch_to_arch(devarch); return true; @@ -873,7 +968,7 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata, return false; } -static void cpu_enable_tracing(void) +static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) { u64 dfr0 = read_sysreg(id_aa64dfr0_el1); u64 trfcr; @@ -881,6 +976,7 @@ static void cpu_enable_tracing(void) if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT)) return; + drvdata->trfc = true; /* * If the CPU supports v8.4 SelfHosted Tracing, enable * tracing at the kernel EL and EL0, forcing to use the @@ -920,6 +1016,9 @@ static void etm4_init_arch_data(void *info) if (!etm4_init_csdev_access(drvdata, csa)) return; + /* Detect the support for OS Lock before we actually use it */ + etm_detect_os_lock(drvdata, csa); + /* Make sure all registers are accessible */ etm4_os_unlock_csa(drvdata, csa); etm4_cs_unlock(drvdata, csa); @@ -1082,7 +1181,7 @@ static void etm4_init_arch_data(void *info) /* NUMCNTR, bits[30:28] number of counters available for tracing */ drvdata->nr_cntr = BMVAL(etmidr5, 28, 30); etm4_cs_lock(drvdata, csa); - cpu_enable_tracing(); + cpu_enable_tracing(drvdata); } static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config) @@ -1760,6 +1859,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) struct etmv4_drvdata *drvdata; struct coresight_desc desc = { 0 }; struct etm4_init_arg init_arg = { 0 }; + u8 major, minor; + char *type_name; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) @@ -1786,10 +1887,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) if (drvdata->cpu < 0) return drvdata->cpu; - desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu); - if (!desc.name) - return -ENOMEM; - init_arg.drvdata = drvdata; init_arg.csa = &desc.access; init_arg.pid = etm_pid; @@ -1806,6 +1903,22 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up")) drvdata->skip_power_up = true; + major = ETM_ARCH_MAJOR_VERSION(drvdata->arch); + minor = ETM_ARCH_MINOR_VERSION(drvdata->arch); + + if (etm4x_is_ete(drvdata)) { + type_name = "ete"; + /* ETE v1 has major version == 0b101. Adjust this for logging.*/ + major -= 4; + } else { + type_name = "etm"; + } + + desc.name = devm_kasprintf(dev, GFP_KERNEL, + "%s%d", type_name, drvdata->cpu); + if (!desc.name) + return -ENOMEM; + etm4_init_trace_id(drvdata); etm4_set_default(&drvdata->config); @@ -1833,9 +1946,8 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid) etmdrvdata[drvdata->cpu] = drvdata; - dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n", - drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch), - ETM_ARCH_MINOR_VERSION(drvdata->arch)); + dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n", + drvdata->cpu, type_name, major, minor); if (boot_enable) { coresight_enable(drvdata->csdev); @@ -1978,6 +2090,7 @@ static struct amba_driver etm4x_amba_driver = { static const struct of_device_id etm4_sysreg_match[] = { { .compatible = "arm,coresight-etm4x-sysreg" }, + { .compatible = "arm,embedded-trace-extension" }, {} }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 0995a10790f4..007bad9e7ad8 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -2374,12 +2374,20 @@ static inline bool etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset) { switch (offset) { - ETM4x_SYSREG_LIST_CASES + ETM_COMMON_SYSREG_LIST_CASES /* - * Registers accessible via system instructions are always - * implemented. + * Common registers to ETE & ETM4x accessible via system + * instructions are always implemented. */ return true; + + ETM4x_ONLY_SYSREG_LIST_CASES + /* + * We only support etm4x and ete. So if the device is not + * ETE, it must be ETMv4x. + */ + return !etm4x_is_ete(drvdata); + ETM4x_MMAP_LIST_CASES /* * Registers accessible only via memory-mapped registers @@ -2389,8 +2397,13 @@ etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset) * coresight_register() and the csdev is not initialized * until that is done. So rely on the drvdata->base to * detect if we have a memory mapped access. + * Also ETE doesn't implement memory mapped access, thus + * it is sufficient to check that we are using mmio. */ return !!drvdata->base; + + ETE_ONLY_SYSREG_LIST_CASES + return etm4x_is_ete(drvdata); } return false; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 0af60571aa23..e5b79bdb9851 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -29,6 +29,7 @@ #define TRCAUXCTLR 0x018 #define TRCEVENTCTL0R 0x020 #define TRCEVENTCTL1R 0x024 +#define TRCRSR 0x028 #define TRCSTALLCTLR 0x02C #define TRCTSCTLR 0x030 #define TRCSYNCPR 0x034 @@ -49,6 +50,7 @@ #define TRCSEQRSTEVR 0x118 #define TRCSEQSTR 0x11C #define TRCEXTINSELR 0x120 +#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */ #define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */ #define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */ #define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */ @@ -126,6 +128,8 @@ #define TRCCIDR2 0xFF8 #define TRCCIDR3 0xFFC +#define TRCRSR_TA BIT(12) + /* * System instructions to access ETM registers. * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions @@ -160,10 +164,22 @@ #define CASE_NOP(__unused, x) \ case (x): /* fall through */ +#define ETE_ONLY_SYSREG_LIST(op, val) \ + CASE_##op((val), TRCRSR) \ + CASE_##op((val), TRCEXTINSELRn(1)) \ + CASE_##op((val), TRCEXTINSELRn(2)) \ + CASE_##op((val), TRCEXTINSELRn(3)) + /* List of registers accessible via System instructions */ -#define ETM_SYSREG_LIST(op, val) \ - CASE_##op((val), TRCPRGCTLR) \ +#define ETM4x_ONLY_SYSREG_LIST(op, val) \ CASE_##op((val), TRCPROCSELR) \ + CASE_##op((val), TRCVDCTLR) \ + CASE_##op((val), TRCVDSACCTLR) \ + CASE_##op((val), TRCVDARCCTLR) \ + CASE_##op((val), TRCOSLAR) + +#define ETM_COMMON_SYSREG_LIST(op, val) \ + CASE_##op((val), TRCPRGCTLR) \ CASE_##op((val), TRCSTATR) \ CASE_##op((val), TRCCONFIGR) \ CASE_##op((val), TRCAUXCTLR) \ @@ -180,9 +196,6 @@ CASE_##op((val), TRCVIIECTLR) \ CASE_##op((val), TRCVISSCTLR) \ CASE_##op((val), TRCVIPCSSCTLR) \ - CASE_##op((val), TRCVDCTLR) \ - CASE_##op((val), TRCVDSACCTLR) \ - CASE_##op((val), TRCVDARCCTLR) \ CASE_##op((val), TRCSEQEVRn(0)) \ CASE_##op((val), TRCSEQEVRn(1)) \ CASE_##op((val), TRCSEQEVRn(2)) \ @@ -277,7 +290,6 @@ CASE_##op((val), TRCSSPCICRn(5)) \ CASE_##op((val), TRCSSPCICRn(6)) \ CASE_##op((val), TRCSSPCICRn(7)) \ - CASE_##op((val), TRCOSLAR) \ CASE_##op((val), TRCOSLSR) \ CASE_##op((val), TRCACVRn(0)) \ CASE_##op((val), TRCACVRn(1)) \ @@ -369,12 +381,38 @@ CASE_##op((val), TRCPIDR2) \ CASE_##op((val), TRCPIDR3) -#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res)) -#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val)) +#define ETM4x_READ_SYSREG_CASES(res) \ + ETM_COMMON_SYSREG_LIST(READ, (res)) \ + ETM4x_ONLY_SYSREG_LIST(READ, (res)) + +#define ETM4x_WRITE_SYSREG_CASES(val) \ + ETM_COMMON_SYSREG_LIST(WRITE, (val)) \ + ETM4x_ONLY_SYSREG_LIST(WRITE, (val)) + +#define ETM_COMMON_SYSREG_LIST_CASES \ + ETM_COMMON_SYSREG_LIST(NOP, __unused) + +#define ETM4x_ONLY_SYSREG_LIST_CASES \ + ETM4x_ONLY_SYSREG_LIST(NOP, __unused) + +#define ETM4x_SYSREG_LIST_CASES \ + ETM_COMMON_SYSREG_LIST_CASES \ + ETM4x_ONLY_SYSREG_LIST(NOP, __unused) -#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused) #define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused) +/* ETE only supports system register access */ +#define ETE_READ_CASES(res) \ + ETM_COMMON_SYSREG_LIST(READ, (res)) \ + ETE_ONLY_SYSREG_LIST(READ, (res)) + +#define ETE_WRITE_CASES(val) \ + ETM_COMMON_SYSREG_LIST(WRITE, (val)) \ + ETE_ONLY_SYSREG_LIST(WRITE, (val)) + +#define ETE_ONLY_SYSREG_LIST_CASES \ + ETE_ONLY_SYSREG_LIST(NOP, __unused) + #define read_etm4x_sysreg_offset(offset, _64bit) \ ({ \ u64 __val; \ @@ -506,6 +544,20 @@ ETM_MODE_EXCL_USER) /* + * TRCOSLSR.OSLM advertises the OS Lock model. + * OSLM[2:0] = TRCOSLSR[4:3,0] + * + * 0b000 - Trace OS Lock is not implemented. + * 0b010 - Trace OS Lock is implemented. + * 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock. + */ +#define ETM_OSLOCK_NI 0b000 +#define ETM_OSLOCK_PRESENT 0b010 +#define ETM_OSLOCK_PE 0b100 + +#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1)) + +/* * TRCDEVARCH Bit field definitions * Bits[31:21] - ARCHITECT = Always Arm Ltd. * * Bits[31:28] = 0x4 @@ -541,11 +593,14 @@ ((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13)) #define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4) +#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5) #define ETM_DEVARCH_ID_MASK \ (ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT) #define ETM_DEVARCH_ETMv4x_ARCH \ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT) +#define ETM_DEVARCH_ETE_ARCH \ + (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT) #define TRCSTATR_IDLE_BIT 0 #define TRCSTATR_PMSTABLE_BIT 1 @@ -635,6 +690,8 @@ #define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU) #define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0) +#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0) + /* Interpretation of resource numbers change at ETM v4.3 architecture */ #define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3) @@ -862,6 +919,7 @@ struct etmv4_save_state { * @nooverflow: Indicate if overflow prevention is supported. * @atbtrig: If the implementation can support ATB triggers * @lpoverride: If the implementation can support low-power state over. + * @trfc: If the implementation supports Arm v8.4 trace filter controls. * @config: structure holding configuration parameters. * @save_state: State to be preserved across power loss * @state_needs_restore: True when there is context to restore after PM exit @@ -897,6 +955,7 @@ struct etmv4_drvdata { u8 s_ex_level; u8 ns_ex_level; u8 q_support; + u8 os_lock_model; bool sticky_enable; bool boot_enable; bool os_unlock; @@ -912,6 +971,7 @@ struct etmv4_drvdata { bool nooverflow; bool atbtrig; bool lpoverride; + bool trfc; struct etmv4_config config; struct etmv4_save_state *save_state; bool state_needs_restore; @@ -940,4 +1000,9 @@ void etm4_config_trace_mode(struct etmv4_config *config); u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit); void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit); + +static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata) +{ + return drvdata->arch >= ETM_ARCH_ETE; +} #endif diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index 3629b7885aca..c594f45319fc 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node, struct of_endpoint endpoint; int in = 0, out = 0; + /* + * Avoid warnings in of_graph_get_next_endpoint() + * if the device doesn't have any graph connections + */ + if (!of_graph_is_present(node)) + return; do { ep = of_graph_get_next_endpoint(node, ep); if (!ep) diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index f5f654ea2994..ff1dd2092ac5 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -232,4 +232,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode); void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev, struct coresight_device *ect_csdev); +void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev); +struct coresight_device *coresight_get_percpu_sink(int cpu); + #endif diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c new file mode 100644 index 000000000000..176868496879 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight + * sink device could then pair with an appropriate per-cpu coresight source + * device (ETE) thus generating required trace data. Trace can be enabled + * via the perf framework. + * + * The AUX buffer handling is inspired from Arm SPE PMU driver. + * + * Copyright (C) 2020 ARM Ltd. + * + * Author: Anshuman Khandual <anshuman.khandual@arm.com> + */ +#define DRVNAME "arm_trbe" + +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <asm/barrier.h> +#include "coresight-trbe.h" + +#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) + +/* + * A padding packet that will help the user space tools + * in skipping relevant sections in the captured trace + * data which could not be decoded. TRBE doesn't support + * formatting the trace data, unlike the legacy CoreSight + * sinks and thus we use ETE trace packets to pad the + * sections of the buffer. + */ +#define ETE_IGNORE_PACKET 0x70 + +/* + * Minimum amount of meaningful trace will contain: + * A-Sync, Trace Info, Trace On, Address, Atom. + * This is about 44bytes of ETE trace. To be on + * the safer side, we assume 64bytes is the minimum + * space required for a meaningful session, before + * we hit a "WRAP" event. + */ +#define TRBE_TRACE_MIN_BUF_SIZE 64 + +enum trbe_fault_action { + TRBE_FAULT_ACT_WRAP, + TRBE_FAULT_ACT_SPURIOUS, + TRBE_FAULT_ACT_FATAL, +}; + +struct trbe_buf { + /* + * Even though trbe_base represents vmap() + * mapped allocated buffer's start address, + * it's being as unsigned long for various + * arithmetic and comparision operations & + * also to be consistent with trbe_write & + * trbe_limit sibling pointers. + */ + unsigned long trbe_base; + unsigned long trbe_limit; + unsigned long trbe_write; + int nr_pages; + void **pages; + bool snapshot; + struct trbe_cpudata *cpudata; +}; + +struct trbe_cpudata { + bool trbe_flag; + u64 trbe_align; + int cpu; + enum cs_mode mode; + struct trbe_buf *buf; + struct trbe_drvdata *drvdata; +}; + +struct trbe_drvdata { + struct trbe_cpudata __percpu *cpudata; + struct perf_output_handle * __percpu *handle; + struct hlist_node hotplug_node; + int irq; + cpumask_t supported_cpus; + enum cpuhp_state trbe_online; + struct platform_device *pdev; +}; + +static int trbe_alloc_node(struct perf_event *event) +{ + if (event->cpu == -1) + return NUMA_NO_NODE; + return cpu_to_node(event->cpu); +} + +static void trbe_drain_buffer(void) +{ + tsb_csync(); + dsb(nsh); +} + +static void trbe_drain_and_disable_local(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + trbe_drain_buffer(); + + /* + * Disable the TRBE without clearing LIMITPTR which + * might be required for fetching the buffer limits. + */ + trblimitr &= ~TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + isb(); +} + +static void trbe_reset_local(void) +{ + trbe_drain_and_disable_local(); + write_sysreg_s(0, SYS_TRBLIMITR_EL1); + write_sysreg_s(0, SYS_TRBPTR_EL1); + write_sysreg_s(0, SYS_TRBBASER_EL1); + write_sysreg_s(0, SYS_TRBSR_EL1); +} + +static void trbe_stop_and_truncate_event(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + /* + * We cannot proceed with the buffer collection and we + * do not have any data for the current session. The + * etm_perf driver expects to close out the aux_buffer + * at event_stop(). So disable the TRBE here and leave + * the update_buffer() to return a 0 size. + */ + trbe_drain_and_disable_local(); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; +} + +/* + * TRBE Buffer Management + * + * The TRBE buffer spans from the base pointer till the limit pointer. When enabled, + * it starts writing trace data from the write pointer onward till the limit pointer. + * When the write pointer reaches the address just before the limit pointer, it gets + * wrapped around again to the base pointer. This is called a TRBE wrap event, which + * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver + * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ + * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and + * LIMIT pointers. + * + * Wrap around with an IRQ + * ------ < ------ < ------- < ----- < ----- + * | | + * ------ > ------ > ------- > ----- > ----- + * + * +---------------+-----------------------+ + * | | | + * +---------------+-----------------------+ + * Base Pointer Write Pointer Limit Pointer + * + * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write + * pointer can be aligned to the implementation defined TRBE trace buffer alignment + * as captured in trbe_cpudata->trbe_align. + * + * + * head tail wakeup + * +---------------------------------------+----- ~ ~ ------ + * |$$$$$$$|################|$$$$$$$$$$$$$$| | + * +---------------------------------------+----- ~ ~ ------ + * Base Pointer Write Pointer Limit Pointer + * + * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing + * values which tracks all the driver writes and user reads from the perf auxiliary + * buffer. Generally [head..tail] is the area where the driver can write into unless + * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and + * configured depending on the perf_output_handle indices, so that the driver does + * not override into areas in the perf auxiliary buffer which is being or yet to be + * consumed from the user space. The enabled TRBE buffer area is a moving subset of + * the allocated perf auxiliary buffer. + */ +static void trbe_pad_buf(struct perf_output_handle *handle, int len) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len); + if (!buf->snapshot) + perf_aux_output_skip(handle, len); +} + +static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + /* + * The ETE trace has alignment synchronization packets allowing + * the decoder to reset in case of an overflow or corruption. + * So we can use the entire buffer for the snapshot mode. + */ + return buf->nr_pages * PAGE_SIZE; +} + +/* + * TRBE Limit Calculation + * + * The following markers are used to illustrate various TRBE buffer situations. + * + * $$$$ - Data area, unconsumed captured trace data, not to be overridden + * #### - Free area, enabled, trace will be written + * %%%% - Free area, disabled, trace will not be written + * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped + */ +static unsigned long __trbe_normal_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + struct trbe_cpudata *cpudata = buf->cpudata; + const u64 bufsize = buf->nr_pages * PAGE_SIZE; + u64 limit = bufsize; + u64 head, tail, wakeup; + + head = PERF_IDX2OFF(handle->head, buf); + + /* + * head + * ------->| + * | + * head TRBE align tail + * +----|-------|---------------|-------+ + * |$$$$|=======|###############|$$$$$$$| + * +----|-------|---------------|-------+ + * trbe_base trbe_base + nr_pages + * + * Perf aux buffer output head position can be misaligned depending on + * various factors including user space reads. In case misaligned, head + * needs to be aligned before TRBE can be configured. Pad the alignment + * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools + * and skip this section thus advancing the head. + */ + if (!IS_ALIGNED(head, cpudata->trbe_align)) { + unsigned long delta = roundup(head, cpudata->trbe_align) - head; + + delta = min(delta, handle->size); + trbe_pad_buf(handle, delta); + head = PERF_IDX2OFF(handle->head, buf); + } + + /* + * head = tail (size = 0) + * +----|-------------------------------+ + * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ | + * +----|-------------------------------+ + * trbe_base trbe_base + nr_pages + * + * Perf aux buffer does not have any space for the driver to write into. + * Just communicate trace truncation event to the user space by marking + * it with PERF_AUX_FLAG_TRUNCATED. + */ + if (!handle->size) { + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + return 0; + } + + /* Compute the tail and wakeup indices now that we've aligned head */ + tail = PERF_IDX2OFF(handle->head + handle->size, buf); + wakeup = PERF_IDX2OFF(handle->wakeup, buf); + + /* + * Lets calculate the buffer area which TRBE could write into. There + * are three possible scenarios here. Limit needs to be aligned with + * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the + * unconsumed data. + * + * 1) head < tail + * + * head tail + * +----|-----------------------|-------+ + * |$$$$|#######################|$$$$$$$| + * +----|-----------------------|-------+ + * trbe_base limit trbe_base + nr_pages + * + * TRBE could write into [head..tail] area. Unless the tail is right at + * the end of the buffer, neither an wrap around nor an IRQ is expected + * while being enabled. + * + * 2) head == tail + * + * head = tail (size > 0) + * +----|-------------------------------+ + * |%%%%|###############################| + * +----|-------------------------------+ + * trbe_base limit = trbe_base + nr_pages + * + * TRBE should just write into [head..base + nr_pages] area even though + * the entire buffer is empty. Reason being, when the trace reaches the + * end of the buffer, it will just wrap around with an IRQ giving an + * opportunity to reconfigure the buffer. + * + * 3) tail < head + * + * tail head + * +----|-----------------------|-------+ + * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######| + * +----|-----------------------|-------+ + * trbe_base limit = trbe_base + nr_pages + * + * TRBE should just write into [head..base + nr_pages] area even though + * the [trbe_base..tail] is also empty. Reason being, when the trace + * reaches the end of the buffer, it will just wrap around with an IRQ + * giving an opportunity to reconfigure the buffer. + */ + if (head < tail) + limit = round_down(tail, PAGE_SIZE); + + /* + * Wakeup may be arbitrarily far into the future. If it's not in the + * current generation, either we'll wrap before hitting it, or it's + * in the past and has been handled already. + * + * If there's a wakeup before we wrap, arrange to be woken up by the + * page boundary following it. Keep the tail boundary if that's lower. + * + * head wakeup tail + * +----|---------------|-------|-------+ + * |$$$$|###############|%%%%%%%|$$$$$$$| + * +----|---------------|-------|-------+ + * trbe_base limit trbe_base + nr_pages + */ + if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) + limit = min(limit, round_up(wakeup, PAGE_SIZE)); + + /* + * There are two situation when this can happen i.e limit is before + * the head and hence TRBE cannot be configured. + * + * 1) head < tail (aligned down with PAGE_SIZE) and also they are both + * within the same PAGE size range. + * + * PAGE_SIZE + * |----------------------| + * + * limit head tail + * +------------|------|--------|-------+ + * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$| + * +------------|------|--------|-------+ + * trbe_base trbe_base + nr_pages + * + * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both + * head and wakeup are within same PAGE size range. + * + * PAGE_SIZE + * |----------------------| + * + * limit head wakeup tail + * +----|------|-------|--------|-------+ + * |$$$$$$$$$$$|=======|========|$$$$$$$| + * +----|------|-------|--------|-------+ + * trbe_base trbe_base + nr_pages + */ + if (limit > head) + return limit; + + trbe_pad_buf(handle, handle->size); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + return 0; +} + +static unsigned long trbe_normal_offset(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = perf_get_aux(handle); + u64 limit = __trbe_normal_offset(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + /* + * If the head is too close to the limit and we don't + * have space for a meaningful run, we rather pad it + * and start fresh. + */ + if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) { + trbe_pad_buf(handle, limit - head); + limit = __trbe_normal_offset(handle); + } + return limit; +} + +static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + unsigned long offset; + + if (buf->snapshot) + offset = trbe_snapshot_offset(handle); + else + offset = trbe_normal_offset(handle); + return buf->trbe_base + offset; +} + +static void clr_trbe_status(void) +{ + u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); + + WARN_ON(is_trbe_enabled()); + trbsr &= ~TRBSR_IRQ; + trbsr &= ~TRBSR_TRG; + trbsr &= ~TRBSR_WRAP; + trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT); + trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT); + trbsr &= ~TRBSR_STOP; + write_sysreg_s(trbsr, SYS_TRBSR_EL1); +} + +static void set_trbe_limit_pointer_enabled(unsigned long addr) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + + trblimitr &= ~TRBLIMITR_NVM; + trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT); + trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT); + trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + + /* + * Fill trace buffer mode is used here while configuring the + * TRBE for trace capture. In this particular mode, the trace + * collection is stopped and a maintenance interrupt is raised + * when the current write pointer wraps. This pause in trace + * collection gives the software an opportunity to capture the + * trace data in the interrupt handler, before reconfiguring + * the TRBE. + */ + trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT; + + /* + * Trigger mode is not used here while configuring the TRBE for + * the trace capture. Hence just keep this in the ignore mode. + */ + trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) << + TRBLIMITR_TRIG_MODE_SHIFT; + trblimitr |= (addr & PAGE_MASK); + + trblimitr |= TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + + /* Synchronize the TRBE enable event */ + isb(); +} + +static void trbe_enable_hw(struct trbe_buf *buf) +{ + WARN_ON(buf->trbe_write < buf->trbe_base); + WARN_ON(buf->trbe_write >= buf->trbe_limit); + set_trbe_disabled(); + isb(); + clr_trbe_status(); + set_trbe_base_pointer(buf->trbe_base); + set_trbe_write_pointer(buf->trbe_write); + + /* + * Synchronize all the register updates + * till now before enabling the TRBE. + */ + isb(); + set_trbe_limit_pointer_enabled(buf->trbe_limit); +} + +static enum trbe_fault_action trbe_get_fault_act(u64 trbsr) +{ + int ec = get_trbe_ec(trbsr); + int bsc = get_trbe_bsc(trbsr); + + WARN_ON(is_trbe_running(trbsr)); + if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr)) + return TRBE_FAULT_ACT_FATAL; + + if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT)) + return TRBE_FAULT_ACT_FATAL; + + if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) { + if (get_trbe_write_pointer() == get_trbe_base_pointer()) + return TRBE_FAULT_ACT_WRAP; + } + return TRBE_FAULT_ACT_SPURIOUS; +} + +static void *arm_trbe_alloc_buffer(struct coresight_device *csdev, + struct perf_event *event, void **pages, + int nr_pages, bool snapshot) +{ + struct trbe_buf *buf; + struct page **pglist; + int i; + + /* + * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with + * just a single page, there would not be any room left while writing + * into a partially filled TRBE buffer after the page size alignment. + * Hence restrict the minimum buffer size as two pages. + */ + if (nr_pages < 2) + return NULL; + + buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event)); + if (!buf) + return ERR_PTR(-ENOMEM); + + pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + kfree(buf); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nr_pages; i++) + pglist[i] = virt_to_page(pages[i]); + + buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); + if (!buf->trbe_base) { + kfree(pglist); + kfree(buf); + return ERR_PTR(-ENOMEM); + } + buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE; + buf->trbe_write = buf->trbe_base; + buf->snapshot = snapshot; + buf->nr_pages = nr_pages; + buf->pages = pages; + kfree(pglist); + return buf; +} + +static void arm_trbe_free_buffer(void *config) +{ + struct trbe_buf *buf = config; + + vunmap((void *)buf->trbe_base); + kfree(buf); +} + +static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *config) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct trbe_buf *buf = config; + enum trbe_fault_action act; + unsigned long size, offset; + unsigned long write, base, status; + unsigned long flags; + + WARN_ON(buf->cpudata != cpudata); + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (cpudata->mode != CS_MODE_PERF) + return 0; + + perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW); + + /* + * We are about to disable the TRBE. And this could in turn + * fill up the buffer triggering, an IRQ. This could be consumed + * by the PE asynchronously, causing a race here against + * the IRQ handler in closing out the handle. So, let us + * make sure the IRQ can't trigger while we are collecting + * the buffer. We also make sure that a WRAP event is handled + * accordingly. + */ + local_irq_save(flags); + + /* + * If the TRBE was disabled due to lack of space in the AUX buffer or a + * spurious fault, the driver leaves it disabled, truncating the buffer. + * Since the etm_perf driver expects to close out the AUX buffer, the + * driver skips it. Thus, just pass in 0 size here to indicate that the + * buffer was truncated. + */ + if (!is_trbe_enabled()) { + size = 0; + goto done; + } + /* + * perf handle structure needs to be shared with the TRBE IRQ handler for + * capturing trace data and restarting the handle. There is a probability + * of an undefined reference based crash when etm event is being stopped + * while a TRBE IRQ also getting processed. This happens due the release + * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping + * the TRBE here will ensure that no IRQ could be generated when the perf + * handle gets freed in etm_event_stop(). + */ + trbe_drain_and_disable_local(); + write = get_trbe_write_pointer(); + base = get_trbe_base_pointer(); + + /* Check if there is a pending interrupt and handle it here */ + status = read_sysreg_s(SYS_TRBSR_EL1); + if (is_trbe_irq(status)) { + + /* + * Now that we are handling the IRQ here, clear the IRQ + * from the status, to let the irq handler know that it + * is taken care of. + */ + clr_trbe_irq(); + isb(); + + act = trbe_get_fault_act(status); + /* + * If this was not due to a WRAP event, we have some + * errors and as such buffer is empty. + */ + if (act != TRBE_FAULT_ACT_WRAP) { + size = 0; + goto done; + } + + /* + * Otherwise, the buffer is full and the write pointer + * has reached base. Adjust this back to the Limit pointer + * for correct size. Also, mark the buffer truncated. + */ + write = get_trbe_limit_pointer(); + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + } + + offset = write - base; + if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf))) + size = 0; + else + size = offset - PERF_IDX2OFF(handle->head, buf); + +done: + local_irq_restore(flags); + + if (buf->snapshot) + handle->head += size; + return size; +} + +static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct perf_output_handle *handle = data; + struct trbe_buf *buf = etm_perf_sink_config(handle); + + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (mode != CS_MODE_PERF) + return -EINVAL; + + *this_cpu_ptr(drvdata->handle) = handle; + cpudata->buf = buf; + cpudata->mode = mode; + buf->cpudata = cpudata; + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_stop_and_truncate_event(handle); + return 0; + } + trbe_enable_hw(buf); + return 0; +} + +static int arm_trbe_disable(struct coresight_device *csdev) +{ + struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev); + struct trbe_buf *buf = cpudata->buf; + + WARN_ON(buf->cpudata != cpudata); + WARN_ON(cpudata->cpu != smp_processor_id()); + WARN_ON(cpudata->drvdata != drvdata); + if (cpudata->mode != CS_MODE_PERF) + return -EINVAL; + + trbe_drain_and_disable_local(); + buf->cpudata = NULL; + cpudata->buf = NULL; + cpudata->mode = CS_MODE_DISABLED; + return 0; +} + +static void trbe_handle_spurious(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_drain_and_disable_local(); + return; + } + trbe_enable_hw(buf); +} + +static void trbe_handle_overflow(struct perf_output_handle *handle) +{ + struct perf_event *event = handle->event; + struct trbe_buf *buf = etm_perf_sink_config(handle); + unsigned long offset, size; + struct etm_event_data *event_data; + + offset = get_trbe_limit_pointer() - get_trbe_base_pointer(); + size = offset - PERF_IDX2OFF(handle->head, buf); + if (buf->snapshot) + handle->head += size; + + /* + * Mark the buffer as truncated, as we have stopped the trace + * collection upon the WRAP event, without stopping the source. + */ + perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW | + PERF_AUX_FLAG_TRUNCATED); + perf_aux_output_end(handle, size); + event_data = perf_aux_output_begin(handle, event); + if (!event_data) { + /* + * We are unable to restart the trace collection, + * thus leave the TRBE disabled. The etm-perf driver + * is able to detect this with a disconnected handle + * (handle->event = NULL). + */ + trbe_drain_and_disable_local(); + *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL; + return; + } + buf->trbe_limit = compute_trbe_buffer_limit(handle); + buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf); + if (buf->trbe_limit == buf->trbe_base) { + trbe_stop_and_truncate_event(handle); + return; + } + *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle; + trbe_enable_hw(buf); +} + +static bool is_perf_trbe(struct perf_output_handle *handle) +{ + struct trbe_buf *buf = etm_perf_sink_config(handle); + struct trbe_cpudata *cpudata = buf->cpudata; + struct trbe_drvdata *drvdata = cpudata->drvdata; + int cpu = smp_processor_id(); + + WARN_ON(buf->trbe_base != get_trbe_base_pointer()); + WARN_ON(buf->trbe_limit != get_trbe_limit_pointer()); + + if (cpudata->mode != CS_MODE_PERF) + return false; + + if (cpudata->cpu != cpu) + return false; + + if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + return false; + + return true; +} + +static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) +{ + struct perf_output_handle **handle_ptr = dev; + struct perf_output_handle *handle = *handle_ptr; + enum trbe_fault_action act; + u64 status; + + /* + * Ensure the trace is visible to the CPUs and + * any external aborts have been resolved. + */ + trbe_drain_and_disable_local(); + + status = read_sysreg_s(SYS_TRBSR_EL1); + /* + * If the pending IRQ was handled by update_buffer callback + * we have nothing to do here. + */ + if (!is_trbe_irq(status)) + return IRQ_NONE; + + clr_trbe_irq(); + isb(); + + if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle)) + return IRQ_NONE; + + if (!is_perf_trbe(handle)) + return IRQ_NONE; + + /* + * Ensure perf callbacks have completed, which may disable + * the trace buffer in response to a TRUNCATION flag. + */ + irq_work_run(); + + act = trbe_get_fault_act(status); + switch (act) { + case TRBE_FAULT_ACT_WRAP: + trbe_handle_overflow(handle); + break; + case TRBE_FAULT_ACT_SPURIOUS: + trbe_handle_spurious(handle); + break; + case TRBE_FAULT_ACT_FATAL: + trbe_stop_and_truncate_event(handle); + break; + } + return IRQ_HANDLED; +} + +static const struct coresight_ops_sink arm_trbe_sink_ops = { + .enable = arm_trbe_enable, + .disable = arm_trbe_disable, + .alloc_buffer = arm_trbe_alloc_buffer, + .free_buffer = arm_trbe_free_buffer, + .update_buffer = arm_trbe_update_buffer, +}; + +static const struct coresight_ops arm_trbe_cs_ops = { + .sink_ops = &arm_trbe_sink_ops, +}; + +static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct trbe_cpudata *cpudata = dev_get_drvdata(dev); + + return sprintf(buf, "%llx\n", cpudata->trbe_align); +} +static DEVICE_ATTR_RO(align); + +static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct trbe_cpudata *cpudata = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", cpudata->trbe_flag); +} +static DEVICE_ATTR_RO(flag); + +static struct attribute *arm_trbe_attrs[] = { + &dev_attr_align.attr, + &dev_attr_flag.attr, + NULL, +}; + +static const struct attribute_group arm_trbe_group = { + .attrs = arm_trbe_attrs, +}; + +static const struct attribute_group *arm_trbe_groups[] = { + &arm_trbe_group, + NULL, +}; + +static void arm_trbe_enable_cpu(void *info) +{ + struct trbe_drvdata *drvdata = info; + + trbe_reset_local(); + enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE); +} + +static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu) +{ + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); + struct coresight_desc desc = { 0 }; + struct device *dev; + + if (WARN_ON(trbe_csdev)) + return; + + dev = &cpudata->drvdata->pdev->dev; + desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu); + if (!desc.name) + goto cpu_clear; + + desc.type = CORESIGHT_DEV_TYPE_SINK; + desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM; + desc.ops = &arm_trbe_cs_ops; + desc.pdata = dev_get_platdata(dev); + desc.groups = arm_trbe_groups; + desc.dev = dev; + trbe_csdev = coresight_register(&desc); + if (IS_ERR(trbe_csdev)) + goto cpu_clear; + + dev_set_drvdata(&trbe_csdev->dev, cpudata); + coresight_set_percpu_sink(cpu, trbe_csdev); + return; +cpu_clear: + cpumask_clear_cpu(cpu, &drvdata->supported_cpus); +} + +static void arm_trbe_probe_cpu(void *info) +{ + struct trbe_drvdata *drvdata = info; + int cpu = smp_processor_id(); + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + u64 trbidr; + + if (WARN_ON(!cpudata)) + goto cpu_clear; + + if (!is_trbe_available()) { + pr_err("TRBE is not implemented on cpu %d\n", cpu); + goto cpu_clear; + } + + trbidr = read_sysreg_s(SYS_TRBIDR_EL1); + if (!is_trbe_programmable(trbidr)) { + pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu); + goto cpu_clear; + } + + cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr); + if (cpudata->trbe_align > SZ_2K) { + pr_err("Unsupported alignment on cpu %d\n", cpu); + goto cpu_clear; + } + cpudata->trbe_flag = get_trbe_flag_update(trbidr); + cpudata->cpu = cpu; + cpudata->drvdata = drvdata; + return; +cpu_clear: + cpumask_clear_cpu(cpu, &drvdata->supported_cpus); +} + +static void arm_trbe_remove_coresight_cpu(void *info) +{ + int cpu = smp_processor_id(); + struct trbe_drvdata *drvdata = info; + struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu); + struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu); + + disable_percpu_irq(drvdata->irq); + trbe_reset_local(); + if (trbe_csdev) { + coresight_unregister(trbe_csdev); + cpudata->drvdata = NULL; + coresight_set_percpu_sink(cpu, NULL); + } +} + +static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata) +{ + int cpu; + + drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata)); + if (!drvdata->cpudata) + return -ENOMEM; + + for_each_cpu(cpu, &drvdata->supported_cpus) { + smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_register_coresight_cpu(drvdata, cpu); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1); + } + return 0; +} + +static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata) +{ + int cpu; + + for_each_cpu(cpu, &drvdata->supported_cpus) + smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1); + free_percpu(drvdata->cpudata); + return 0; +} + +static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node) +{ + struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); + + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { + + /* + * If this CPU was not probed for TRBE, + * initialize it now. + */ + if (!coresight_get_percpu_sink(cpu)) { + arm_trbe_probe_cpu(drvdata); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_register_coresight_cpu(drvdata, cpu); + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) + arm_trbe_enable_cpu(drvdata); + } else { + arm_trbe_enable_cpu(drvdata); + } + } + return 0; +} + +static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node) +{ + struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node); + + if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) { + disable_percpu_irq(drvdata->irq); + trbe_reset_local(); + } + return 0; +} + +static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata) +{ + enum cpuhp_state trbe_online; + int ret; + + trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME, + arm_trbe_cpu_startup, arm_trbe_cpu_teardown); + if (trbe_online < 0) + return trbe_online; + + ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node); + if (ret) { + cpuhp_remove_multi_state(trbe_online); + return ret; + } + drvdata->trbe_online = trbe_online; + return 0; +} + +static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata) +{ + cpuhp_remove_multi_state(drvdata->trbe_online); +} + +static int arm_trbe_probe_irq(struct platform_device *pdev, + struct trbe_drvdata *drvdata) +{ + int ret; + + drvdata->irq = platform_get_irq(pdev, 0); + if (drvdata->irq < 0) { + pr_err("IRQ not found for the platform device\n"); + return drvdata->irq; + } + + if (!irq_is_percpu(drvdata->irq)) { + pr_err("IRQ is not a PPI\n"); + return -EINVAL; + } + + if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus)) + return -EINVAL; + + drvdata->handle = alloc_percpu(struct perf_output_handle *); + if (!drvdata->handle) + return -ENOMEM; + + ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle); + if (ret) { + free_percpu(drvdata->handle); + return ret; + } + return 0; +} + +static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata) +{ + free_percpu_irq(drvdata->irq, drvdata->handle); + free_percpu(drvdata->handle); +} + +static int arm_trbe_device_probe(struct platform_device *pdev) +{ + struct coresight_platform_data *pdata; + struct trbe_drvdata *drvdata; + struct device *dev = &pdev->dev; + int ret; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + pdata = coresight_get_platform_data(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + dev_set_drvdata(dev, drvdata); + dev->platform_data = pdata; + drvdata->pdev = pdev; + ret = arm_trbe_probe_irq(pdev, drvdata); + if (ret) + return ret; + + ret = arm_trbe_probe_coresight(drvdata); + if (ret) + goto probe_failed; + + ret = arm_trbe_probe_cpuhp(drvdata); + if (ret) + goto cpuhp_failed; + + return 0; +cpuhp_failed: + arm_trbe_remove_coresight(drvdata); +probe_failed: + arm_trbe_remove_irq(drvdata); + return ret; +} + +static int arm_trbe_device_remove(struct platform_device *pdev) +{ + struct trbe_drvdata *drvdata = platform_get_drvdata(pdev); + + arm_trbe_remove_cpuhp(drvdata); + arm_trbe_remove_coresight(drvdata); + arm_trbe_remove_irq(drvdata); + return 0; +} + +static const struct of_device_id arm_trbe_of_match[] = { + { .compatible = "arm,trace-buffer-extension"}, + {}, +}; +MODULE_DEVICE_TABLE(of, arm_trbe_of_match); + +static struct platform_driver arm_trbe_driver = { + .driver = { + .name = DRVNAME, + .of_match_table = of_match_ptr(arm_trbe_of_match), + .suppress_bind_attrs = true, + }, + .probe = arm_trbe_device_probe, + .remove = arm_trbe_device_remove, +}; + +static int __init arm_trbe_init(void) +{ + int ret; + + if (arm64_kernel_unmapped_at_el0()) { + pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n"); + return -EOPNOTSUPP; + } + + ret = platform_driver_register(&arm_trbe_driver); + if (!ret) + return 0; + + pr_err("Error registering %s platform driver\n", DRVNAME); + return ret; +} + +static void __exit arm_trbe_exit(void) +{ + platform_driver_unregister(&arm_trbe_driver); +} +module_init(arm_trbe_init); +module_exit(arm_trbe_exit); + +MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>"); +MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h new file mode 100644 index 000000000000..abf3e36082f0 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-trbe.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This contains all required hardware related helper functions for + * Trace Buffer Extension (TRBE) driver in the coresight framework. + * + * Copyright (C) 2020 ARM Ltd. + * + * Author: Anshuman Khandual <anshuman.khandual@arm.com> + */ +#include <linux/coresight.h> +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/smp.h> + +#include "coresight-etm-perf.h" + +static inline bool is_trbe_available(void) +{ + u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); + unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT); + + return trbe >= 0b0001; +} + +static inline bool is_trbe_enabled(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + return trblimitr & TRBLIMITR_ENABLE; +} + +#define TRBE_EC_OTHERS 0 +#define TRBE_EC_STAGE1_ABORT 36 +#define TRBE_EC_STAGE2_ABORT 37 + +static inline int get_trbe_ec(u64 trbsr) +{ + return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK; +} + +#define TRBE_BSC_NOT_STOPPED 0 +#define TRBE_BSC_FILLED 1 +#define TRBE_BSC_TRIGGERED 2 + +static inline int get_trbe_bsc(u64 trbsr) +{ + return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK; +} + +static inline void clr_trbe_irq(void) +{ + u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1); + + trbsr &= ~TRBSR_IRQ; + write_sysreg_s(trbsr, SYS_TRBSR_EL1); +} + +static inline bool is_trbe_irq(u64 trbsr) +{ + return trbsr & TRBSR_IRQ; +} + +static inline bool is_trbe_trg(u64 trbsr) +{ + return trbsr & TRBSR_TRG; +} + +static inline bool is_trbe_wrap(u64 trbsr) +{ + return trbsr & TRBSR_WRAP; +} + +static inline bool is_trbe_abort(u64 trbsr) +{ + return trbsr & TRBSR_ABORT; +} + +static inline bool is_trbe_running(u64 trbsr) +{ + return !(trbsr & TRBSR_STOP); +} + +#define TRBE_TRIG_MODE_STOP 0 +#define TRBE_TRIG_MODE_IRQ 1 +#define TRBE_TRIG_MODE_IGNORE 3 + +#define TRBE_FILL_MODE_FILL 0 +#define TRBE_FILL_MODE_WRAP 1 +#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3 + +static inline void set_trbe_disabled(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + + trblimitr &= ~TRBLIMITR_ENABLE; + write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); +} + +static inline bool get_trbe_flag_update(u64 trbidr) +{ + return trbidr & TRBIDR_FLAG; +} + +static inline bool is_trbe_programmable(u64 trbidr) +{ + return !(trbidr & TRBIDR_PROG); +} + +static inline int get_trbe_address_align(u64 trbidr) +{ + return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK; +} + +static inline unsigned long get_trbe_write_pointer(void) +{ + return read_sysreg_s(SYS_TRBPTR_EL1); +} + +static inline void set_trbe_write_pointer(unsigned long addr) +{ + WARN_ON(is_trbe_enabled()); + write_sysreg_s(addr, SYS_TRBPTR_EL1); +} + +static inline unsigned long get_trbe_limit_pointer(void) +{ + u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1); + unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT); + + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + return addr; +} + +static inline unsigned long get_trbe_base_pointer(void) +{ + u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1); + unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT); + + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + return addr; +} + +static inline void set_trbe_base_pointer(unsigned long addr) +{ + WARN_ON(is_trbe_enabled()); + WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT))); + WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE)); + write_sysreg_s(addr, SYS_TRBBASER_EL1); +} diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ed46e6057e33..d205fafede37 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -794,8 +794,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_encode_alloc(cmd, alloc); - /* We can only signal PTZ when alloc==1. Why do we have two bits? */ - its_encode_ptz(cmd, alloc); + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); its_encode_vconf_addr(cmd, vconf_addr); its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); @@ -4554,6 +4559,15 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, its_send_vmapp(its, vpe, false); } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); } static const struct irq_domain_ops its_vpe_domain_ops = { diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d10d84fb79c..d4f7f1f9cc77 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -581,33 +581,6 @@ static const struct attribute_group armpmu_common_attr_group = { .attrs = armpmu_common_attrs, }; -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *__oprofile_cpu_pmu; - -/* - * Despite the names, these two functions are CPU-specific and are used - * by the OProfile/perf code. - */ -const char *perf_pmu_name(void) -{ - if (!__oprofile_cpu_pmu) - return NULL; - - return __oprofile_cpu_pmu->name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - int max_events = 0; - - if (__oprofile_cpu_pmu != NULL) - max_events = __oprofile_cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - static int armpmu_count_irq_users(const int irq) { int cpu, count = 0; @@ -979,9 +952,6 @@ int armpmu_register(struct arm_pmu *pmu) if (ret) goto out_destroy; - if (!__oprofile_cpu_pmu) - __oprofile_cpu_pmu = pmu; - pr_info("enabled with %s PMU driver, %d counters available%s\n", pmu->name, pmu->num_events, has_nmi ? ", using NMIs" : ""); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index f2edef0df40f..8c20e524e9ad 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -108,7 +108,7 @@ config PTP_1588_CLOCK_PCH config PTP_1588_CLOCK_KVM tristate "KVM virtual PTP clock" depends on PTP_1588_CLOCK - depends on KVM_GUEST && X86 + depends on (KVM_GUEST && X86) || (HAVE_ARM_SMCCC_DISCOVERY && ARM_ARCH_TIMER) default y help This driver adds support for using kvm infrastructure as a PTP diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index db5aef3bddc6..8673d1743faa 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -4,6 +4,8 @@ # ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o +ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o diff --git a/drivers/ptp/ptp_kvm_arm.c b/drivers/ptp/ptp_kvm_arm.c new file mode 100644 index 000000000000..b7d28c8dfb84 --- /dev/null +++ b/drivers/ptp/ptp_kvm_arm.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Virtual PTP 1588 clock for use with KVM guests + * Copyright (C) 2019 ARM Ltd. + * All Rights Reserved + */ + +#include <linux/arm-smccc.h> +#include <linux/ptp_kvm.h> + +#include <asm/arch_timer.h> +#include <asm/hypervisor.h> + +int kvm_arch_ptp_init(void) +{ + int ret; + + ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PTP); + if (ret <= 0) + return -EOPNOTSUPP; + + return 0; +} + +int kvm_arch_ptp_get_clock(struct timespec64 *ts) +{ + return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL); +} diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm_common.c index 658d33fc3195..fcae32f56f25 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm_common.c @@ -8,11 +8,11 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/module.h> +#include <linux/ptp_kvm.h> #include <uapi/linux/kvm_para.h> #include <asm/kvm_para.h> -#include <asm/pvclock.h> -#include <asm/kvmclock.h> #include <uapi/asm/kvm_para.h> #include <linux/ptp_clock_kernel.h> @@ -24,56 +24,29 @@ struct kvm_ptp_clock { static DEFINE_SPINLOCK(kvm_ptp_lock); -static struct pvclock_vsyscall_time_info *hv_clock; - -static struct kvm_clock_pairing clock_pair; -static phys_addr_t clock_pair_gpa; - static int ptp_kvm_get_time_fn(ktime_t *device_time, struct system_counterval_t *system_counter, void *ctx) { - unsigned long ret; + long ret; + u64 cycle; struct timespec64 tspec; - unsigned version; - int cpu; - struct pvclock_vcpu_time_info *src; + struct clocksource *cs; spin_lock(&kvm_ptp_lock); preempt_disable_notrace(); - cpu = smp_processor_id(); - src = &hv_clock[cpu].pvti; - - do { - /* - * We are using a TSC value read in the hosts - * kvm_hc_clock_pairing handling. - * So any changes to tsc_to_system_mul - * and tsc_shift or any other pvclock - * data invalidate that measurement. - */ - version = pvclock_read_begin(src); - - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, - clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret != 0) { - pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); - spin_unlock(&kvm_ptp_lock); - preempt_enable_notrace(); - return -EOPNOTSUPP; - } - - tspec.tv_sec = clock_pair.sec; - tspec.tv_nsec = clock_pair.nsec; - ret = __pvclock_read_cycles(src, clock_pair.tsc); - } while (pvclock_read_retry(src, version)); + ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs); + if (ret) { + spin_unlock(&kvm_ptp_lock); + preempt_enable_notrace(); + return ret; + } preempt_enable_notrace(); - system_counter->cycles = ret; - system_counter->cs = &kvm_clock; + system_counter->cycles = cycle; + system_counter->cs = cs; *device_time = timespec64_to_ktime(tspec); @@ -111,22 +84,17 @@ static int ptp_kvm_settime(struct ptp_clock_info *ptp, static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - unsigned long ret; + long ret; struct timespec64 tspec; spin_lock(&kvm_ptp_lock); - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, - clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret != 0) { - pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); + ret = kvm_arch_ptp_get_clock(&tspec); + if (ret) { spin_unlock(&kvm_ptp_lock); - return -EOPNOTSUPP; + return ret; } - tspec.tv_sec = clock_pair.sec; - tspec.tv_nsec = clock_pair.nsec; spin_unlock(&kvm_ptp_lock); memcpy(ts, &tspec, sizeof(struct timespec64)); @@ -168,19 +136,12 @@ static int __init ptp_kvm_init(void) { long ret; - if (!kvm_para_available()) - return -ENODEV; - - clock_pair_gpa = slow_virt_to_phys(&clock_pair); - hv_clock = pvclock_get_pvti_cpu0_va(); - - if (!hv_clock) - return -ENODEV; - - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, - KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) - return -ENODEV; + ret = kvm_arch_ptp_init(); + if (ret) { + if (ret != -EOPNOTSUPP) + pr_err("fail to initialize ptp_kvm"); + return ret; + } kvm_ptp_clock.caps = ptp_kvm_caps; diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c new file mode 100644 index 000000000000..3dd519dfc473 --- /dev/null +++ b/drivers/ptp/ptp_kvm_x86.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <asm/pvclock.h> +#include <asm/kvmclock.h> +#include <linux/module.h> +#include <uapi/asm/kvm_para.h> +#include <uapi/linux/kvm_para.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/ptp_kvm.h> + +struct pvclock_vsyscall_time_info *hv_clock; + +static phys_addr_t clock_pair_gpa; +static struct kvm_clock_pairing clock_pair; + +int kvm_arch_ptp_init(void) +{ + long ret; + + if (!kvm_para_available()) + return -ENODEV; + + clock_pair_gpa = slow_virt_to_phys(&clock_pair); + hv_clock = pvclock_get_pvti_cpu0_va(); + if (!hv_clock) + return -ENODEV; + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) + return -ENODEV; + + return 0; +} + +int kvm_arch_ptp_get_clock(struct timespec64 *ts) +{ + long ret; + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, + clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret != 0) { + pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); + return -EOPNOTSUPP; + } + + ts->tv_sec = clock_pair.sec; + ts->tv_nsec = clock_pair.nsec; + + return 0; +} + +int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, + struct clocksource **cs) +{ + struct pvclock_vcpu_time_info *src; + unsigned int version; + long ret; + int cpu; + + cpu = smp_processor_id(); + src = &hv_clock[cpu].pvti; + + do { + /* + * We are using a TSC value read in the hosts + * kvm_hc_clock_pairing handling. + * So any changes to tsc_to_system_mul + * and tsc_shift or any other pvclock + * data invalidate that measurement. + */ + version = pvclock_read_begin(src); + + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, + clock_pair_gpa, + KVM_CLOCK_PAIRING_WALLCLOCK); + if (ret != 0) { + pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); + return -EOPNOTSUPP; + } + tspec->tv_sec = clock_pair.sec; + tspec->tv_nsec = clock_pair.nsec; + *cycle = __pvclock_read_cycles(src, clock_pair.tsc); + } while (pvclock_read_retry(src, version)); + + *cs = &kvm_clock; + + return 0; +} |