aboutsummaryrefslogtreecommitdiff
path: root/sound/soc/sof
diff options
context:
space:
mode:
Diffstat (limited to 'sound/soc/sof')
-rw-r--r--sound/soc/sof/Kconfig156
-rw-r--r--sound/soc/sof/Makefile18
-rw-r--r--sound/soc/sof/control.c552
-rw-r--r--sound/soc/sof/core.c506
-rw-r--r--sound/soc/sof/debug.c232
-rw-r--r--sound/soc/sof/intel/Kconfig230
-rw-r--r--sound/soc/sof/intel/Makefile19
-rw-r--r--sound/soc/sof/intel/apl.c113
-rw-r--r--sound/soc/sof/intel/bdw.c713
-rw-r--r--sound/soc/sof/intel/byt.c874
-rw-r--r--sound/soc/sof/intel/cnl.c268
-rw-r--r--sound/soc/sof/intel/hda-bus.c111
-rw-r--r--sound/soc/sof/intel/hda-codec.c171
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c181
-rw-r--r--sound/soc/sof/intel/hda-dai.c356
-rw-r--r--sound/soc/sof/intel/hda-dsp.c471
-rw-r--r--sound/soc/sof/intel/hda-ipc.c455
-rw-r--r--sound/soc/sof/intel/hda-loader.c382
-rw-r--r--sound/soc/sof/intel/hda-pcm.c239
-rw-r--r--sound/soc/sof/intel/hda-stream.c701
-rw-r--r--sound/soc/sof/intel/hda-trace.c94
-rw-r--r--sound/soc/sof/intel/hda.c689
-rw-r--r--sound/soc/sof/intel/hda.h583
-rw-r--r--sound/soc/sof/intel/intel-ipc.c92
-rw-r--r--sound/soc/sof/intel/shim.h185
-rw-r--r--sound/soc/sof/ipc.c842
-rw-r--r--sound/soc/sof/loader.c400
-rw-r--r--sound/soc/sof/nocodec.c109
-rw-r--r--sound/soc/sof/ops.c163
-rw-r--r--sound/soc/sof/ops.h411
-rw-r--r--sound/soc/sof/pcm.c767
-rw-r--r--sound/soc/sof/pm.c388
-rw-r--r--sound/soc/sof/sof-acpi-dev.c312
-rw-r--r--sound/soc/sof/sof-pci-dev.c373
-rw-r--r--sound/soc/sof/sof-priv.h635
-rw-r--r--sound/soc/sof/topology.c3179
-rw-r--r--sound/soc/sof/trace.c297
-rw-r--r--sound/soc/sof/utils.c112
-rw-r--r--sound/soc/sof/xtensa/Kconfig2
-rw-r--r--sound/soc/sof/xtensa/Makefile5
-rw-r--r--sound/soc/sof/xtensa/core.c138
41 files changed, 16524 insertions, 0 deletions
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
new file mode 100644
index 000000000000..a1a9ffe605dc
--- /dev/null
+++ b/sound/soc/sof/Kconfig
@@ -0,0 +1,156 @@
+config SND_SOC_SOF_TOPLEVEL
+ bool "Sound Open Firmware Support"
+ help
+ This adds support for Sound Open Firmware (SOF). SOF is a free and
+ generic open source audio DSP firmware for multiple devices.
+ Say Y if you have such a device that is supported by SOF.
+ If unsure select "N".
+
+if SND_SOC_SOF_TOPLEVEL
+
+config SND_SOC_SOF_PCI
+ tristate "SOF PCI enumeration support"
+ depends on PCI
+ select SND_SOC_SOF
+ select SND_SOC_ACPI if ACPI
+ select SND_SOC_SOF_OPTIONS
+ select SND_SOC_SOF_INTEL_PCI if SND_SOC_SOF_INTEL_TOPLEVEL
+ help
+ This adds support for PCI enumeration. This option is
+ required to enable Intel Skylake+ devices
+ Say Y if you need this option
+ If unsure select "N".
+
+config SND_SOC_SOF_ACPI
+ tristate "SOF ACPI enumeration support"
+ depends on ACPI || COMPILE_TEST
+ select SND_SOC_SOF
+ select SND_SOC_ACPI if ACPI
+ select SND_SOC_SOF_OPTIONS
+ select SND_SOC_SOF_INTEL_ACPI if SND_SOC_SOF_INTEL_TOPLEVEL
+ select IOSF_MBI if X86
+ help
+ This adds support for ACPI enumeration. This option is required
+ to enable Intel Haswell/Broadwell/Baytrail/Cherrytrail devices
+ Say Y if you need this option
+ If unsure select "N".
+
+config SND_SOC_SOF_OPTIONS
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+if SND_SOC_SOF_OPTIONS
+
+config SND_SOC_SOF_NOCODEC
+ tristate "SOF nocodec mode Support"
+ help
+ This adds support for a dummy/nocodec machine driver fallback
+ option if no known codec is detected. This is typically only
+ enabled for developers or devices where the sound card is
+ controlled externally
+ Say Y if you need this nocodec fallback option
+ If unsure select "N".
+
+config SND_SOC_SOF_STRICT_ABI_CHECKS
+ bool "SOF strict ABI checks"
+ help
+ This option enables strict ABI checks for firmware and topology
+ files.
+ When these files are more recent than the kernel, the kernel
+ will handle the functionality it supports and may report errors
+ during topology creation or run-time usage if new functionality
+ is invoked.
+ This option will stop topology creation and firmware load upfront.
+ It is intended for SOF CI/releases and not for users or distros.
+ Say Y if you want strict ABI checks for an SOF release
+ If you are not involved in SOF releases and CI development
+ select "N".
+
+config SND_SOC_SOF_DEBUG
+ bool "SOF debugging features"
+ help
+ This option can be used to enable or disable individual SOF firmware
+ and driver debugging options.
+ Say Y if you are debugging SOF FW or drivers.
+ If unsure select "N".
+
+if SND_SOC_SOF_DEBUG
+
+config SND_SOC_SOF_FORCE_NOCODEC_MODE
+ bool "SOF force nocodec Mode"
+ depends on SND_SOC_SOF_NOCODEC
+ help
+ This forces SOF to use dummy/nocodec as machine driver, even
+ though there is a codec detected on the real platform. This is
+ typically only enabled for developers for debug purposes, before
+ codec/machine driver is ready, or to exclude the impact of those
+ drivers
+ Say Y if you need this force nocodec mode option
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_XRUN_STOP
+ bool "SOF stop on XRUN"
+ help
+ This option forces PCMs to stop on any XRUN event. This is useful to
+ preserve any trace data ond pipeline status prior to the XRUN.
+ Say Y if you are debugging SOF FW pipeline XRUNs.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_VERBOSE_IPC
+ bool "SOF verbose IPC logs"
+ help
+ This option enables more verbose IPC logs, with command types in
+ human-readable form instead of just 32-bit hex dumps. This is useful
+ if you are trying to debug IPC with the DSP firmware.
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION
+ bool "SOF force to use IPC for position update on SKL+"
+ help
+ This option force to handle stream position update IPCs and run pcm
+ elapse to inform ALSA about that, on platforms (e.g. Intel SKL+) that
+ with other approach (e.g. HDAC DPIB/posbuf) to elapse PCM.
+ On platforms (e.g. Intel SKL-) where position update IPC is the only
+ one choice, this setting won't impact anything.
+ if you are trying to debug pointer update with position IPCs or where
+ DPIB/posbuf is not ready, select "Y".
+ If unsure select "N".
+
+config SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE
+ bool "SOF enable debugfs caching"
+ help
+ This option enables caching of debugfs
+ memory -> DSP resource (memory, register, etc)
+ before the audio DSP is suspended. This will increase the suspend
+ latency and therefore should be used for debug purposes only.
+ Say Y if you want to enable caching the memory windows.
+ If unsure, select "N".
+
+endif ## SND_SOC_SOF_DEBUG
+
+endif ## SND_SOC_SOF_OPTIONS
+
+config SND_SOC_SOF
+ tristate
+ select SND_SOC_TOPOLOGY
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+ The selection is made at the top level and does not exactly follow
+ module dependencies but since the module or built-in type is decided
+ at the top level it doesn't matter.
+
+config SND_SOC_SOF_PROBE_WORK_QUEUE
+ bool
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+ When selected, the probe is handled in two steps, for example to
+ avoid lockdeps if request_module is used in the probe.
+
+source "sound/soc/sof/intel/Kconfig"
+source "sound/soc/sof/xtensa/Kconfig"
+
+endif
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
new file mode 100644
index 000000000000..8f14c9d2950b
--- /dev/null
+++ b/sound/soc/sof/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
+ control.o trace.o utils.o
+
+snd-sof-pci-objs := sof-pci-dev.o
+snd-sof-acpi-objs := sof-acpi-dev.o
+snd-sof-nocodec-objs := nocodec.o
+
+obj-$(CONFIG_SND_SOC_SOF) += snd-sof.o
+obj-$(CONFIG_SND_SOC_SOF_NOCODEC) += snd-sof-nocodec.o
+
+
+obj-$(CONFIG_SND_SOC_SOF_ACPI) += sof-acpi-dev.o
+obj-$(CONFIG_SND_SOC_SOF_PCI) += sof-pci-dev.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL) += intel/
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += xtensa/
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
new file mode 100644
index 000000000000..11762c4580f1
--- /dev/null
+++ b/sound/soc/sof/control.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/* Mixer Controls */
+
+#include <linux/pm_runtime.h>
+#include "sof-priv.h"
+
+static inline u32 mixer_to_ipc(unsigned int value, u32 *volume_map, int size)
+{
+ if (value >= size)
+ return volume_map[size - 1];
+
+ return volume_map[value];
+}
+
+static inline u32 ipc_to_mixer(u32 value, u32 *volume_map, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (volume_map[i] >= value)
+ return i;
+ }
+
+ return i - 1;
+}
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int err, ret;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: volume get failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* get all the mixer data from DSP */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_GET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_VOLUME,
+ false);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] =
+ ipc_to_mixer(cdata->chanv[i].value,
+ scontrol->volume_table, sm->max + 1);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: volume get failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int ret, err;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: volume put failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ cdata->chanv[i].value =
+ mixer_to_ipc(ucontrol->value.integer.value[i],
+ scontrol->volume_table, sm->max + 1);
+ cdata->chanv[i].channel = i;
+ }
+
+ /* notify DSP of mixer updates */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_VOLUME,
+ true);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: volume put failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int err, ret;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: switch get failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* get all the mixer data from DSP */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_GET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_SWITCH,
+ false);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = cdata->chanv[i].value;
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: switch get failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *sm =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = sm->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int ret, err;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: switch put failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ cdata->chanv[i].value = ucontrol->value.integer.value[i];
+ cdata->chanv[i].channel = i;
+ }
+
+ /* notify DSP of mixer updates */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_SWITCH,
+ true);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: switch put failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int err, ret;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: enum get failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* get all the enum data from DSP */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_GET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_ENUM,
+ false);
+
+ /* read back each channel */
+ for (i = 0; i < channels; i++)
+ ucontrol->value.enumerated.item[i] = cdata->chanv[i].value;
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: enum get failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = se->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ unsigned int i, channels = scontrol->num_channels;
+ int ret, err;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: enum put failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+ cdata->chanv[i].channel = i;
+ }
+
+ /* notify DSP of enum updates */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_SET_VALUE,
+ SOF_CTRL_TYPE_VALUE_CHAN_GET,
+ SOF_CTRL_CMD_ENUM,
+ true);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: enum put failed to idle %d\n",
+ err);
+ return 0;
+}
+
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct sof_abi_hdr *data = cdata->data;
+ size_t size;
+ int ret, err;
+
+ if (be->max > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(sdev->dev,
+ "error: data max %d exceeds ucontrol data array size\n",
+ be->max);
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes get failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* get all the binary data from DSP */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_GET_DATA,
+ SOF_CTRL_TYPE_DATA_GET,
+ scontrol->cmd,
+ false);
+
+ size = data->size + sizeof(*data);
+ if (size > be->max) {
+ dev_err_ratelimited(sdev->dev,
+ "error: DSP sent %zu bytes max is %d\n",
+ size, be->max);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy back to kcontrol */
+ memcpy(ucontrol->value.bytes.data, data, size);
+
+out:
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes get failed to idle %d\n",
+ err);
+ return ret;
+}
+
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct sof_abi_hdr *data = cdata->data;
+ int ret, err;
+
+ if (be->max > sizeof(ucontrol->value.bytes.data)) {
+ dev_err_ratelimited(sdev->dev,
+ "error: data max %d exceeds ucontrol data array size\n",
+ be->max);
+ return -EINVAL;
+ }
+
+ if (data->size > be->max) {
+ dev_err_ratelimited(sdev->dev,
+ "error: size too big %d bytes max is %d\n",
+ data->size, be->max);
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes put failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* copy from kcontrol */
+ memcpy(data, ucontrol->value.bytes.data, data->size);
+
+ /* notify DSP of byte control updates */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_SET_DATA,
+ SOF_CTRL_TYPE_DATA_SET,
+ scontrol->cmd,
+ true);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes put failed to idle %d\n",
+ err);
+ return ret;
+}
+
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_ctl_tlv header;
+ const struct snd_ctl_tlv __user *tlvd =
+ (const struct snd_ctl_tlv __user *)binary_data;
+ int ret;
+ int err;
+
+ /*
+ * The beginning of bytes data contains a header from where
+ * the length (as bytes) is needed to know the correct copy
+ * length of data from tlvd->tlv.
+ */
+ if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
+ return -EFAULT;
+
+ /* be->max is coming from topology */
+ if (header.length > be->max) {
+ dev_err_ratelimited(sdev->dev, "error: Bytes data size %d exceeds max %d.\n",
+ header.length, be->max);
+ return -EINVAL;
+ }
+
+ /* Check that header id matches the command */
+ if (header.numid != scontrol->cmd) {
+ dev_err_ratelimited(sdev->dev,
+ "error: incorrect numid %d\n",
+ header.numid);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(cdata->data, tlvd->tlv, header.length))
+ return -EFAULT;
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err_ratelimited(sdev->dev,
+ "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ return -EINVAL;
+ }
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, cdata->data->abi)) {
+ dev_err_ratelimited(sdev->dev, "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ return -EINVAL;
+ }
+
+ if (cdata->data->size + sizeof(const struct sof_abi_hdr) > be->max) {
+ dev_err_ratelimited(sdev->dev, "error: Mismatch in ABI data size (truncated?).\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes_ext put failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* notify DSP of byte control updates */
+ snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_SET_DATA,
+ SOF_CTRL_TYPE_DATA_SET,
+ scontrol->cmd,
+ true);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes_ext put failed to idle %d\n",
+ err);
+
+ return ret;
+}
+
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size)
+{
+ struct soc_bytes_ext *be =
+ (struct soc_bytes_ext *)kcontrol->private_value;
+ struct snd_sof_control *scontrol = be->dobj.private;
+ struct snd_sof_dev *sdev = scontrol->sdev;
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_ctl_tlv header;
+ struct snd_ctl_tlv __user *tlvd =
+ (struct snd_ctl_tlv __user *)binary_data;
+ int data_size;
+ int err;
+ int ret;
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes_ext get failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /*
+ * Decrement the limit by ext bytes header size to
+ * ensure the user space buffer is not exceeded.
+ */
+ size -= sizeof(const struct snd_ctl_tlv);
+
+ /* set the ABI header values */
+ cdata->data->magic = SOF_ABI_MAGIC;
+ cdata->data->abi = SOF_ABI_VERSION;
+
+ /* get all the component data from DSP */
+ ret = snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ SOF_IPC_COMP_GET_DATA,
+ SOF_CTRL_TYPE_DATA_GET,
+ scontrol->cmd,
+ false);
+
+ /* Prevent read of other kernel data or possibly corrupt response */
+ data_size = cdata->data->size + sizeof(const struct sof_abi_hdr);
+
+ /* check data size doesn't exceed max coming from topology */
+ if (data_size > be->max) {
+ dev_err_ratelimited(sdev->dev, "error: user data size %d exceeds max size %d.\n",
+ data_size, be->max);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ header.numid = scontrol->cmd;
+ header.length = data_size;
+ if (copy_to_user(tlvd, &header, sizeof(const struct snd_ctl_tlv))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(tlvd->tlv, cdata->data, data_size))
+ ret = -EFAULT;
+
+out:
+ pm_runtime_mark_last_busy(sdev->dev);
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: bytes_ext get failed to idle %d\n",
+ err);
+ return ret;
+}
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
new file mode 100644
index 000000000000..39cbd84ff9c8
--- /dev/null
+++ b/sound/soc/sof/core.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+/* SOF defaults if not provided by the platform in ms */
+#define TIMEOUT_DEFAULT_IPC_MS 5
+#define TIMEOUT_DEFAULT_BOOT_MS 100
+
+/*
+ * Generic object lookup APIs.
+ */
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_sof_dev *sdev,
+ const char *name)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ /* match with PCM dai name */
+ if (strcmp(spcm->pcm.dai_name, name) == 0)
+ return spcm;
+
+ /* match with playback caps name if set */
+ if (*spcm->pcm.caps[0].name &&
+ !strcmp(spcm->pcm.caps[0].name, name))
+ return spcm;
+
+ /* match with capture caps name if set */
+ if (*spcm->pcm.caps[1].name &&
+ !strcmp(spcm->pcm.caps[1].name, name))
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_sof_dev *sdev,
+ unsigned int comp_id,
+ int *direction)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].comp_id == comp_id) {
+ *direction = SNDRV_PCM_STREAM_PLAYBACK;
+ return spcm;
+ }
+ if (spcm->stream[SNDRV_PCM_STREAM_CAPTURE].comp_id == comp_id) {
+ *direction = SNDRV_PCM_STREAM_CAPTURE;
+ return spcm;
+ }
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_pcm_id(struct snd_sof_dev *sdev,
+ unsigned int pcm_id)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (le32_to_cpu(spcm->pcm.pcm_id) == pcm_id)
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_sof_dev *sdev,
+ const char *name)
+{
+ struct snd_sof_widget *swidget;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (strcmp(name, swidget->widget->name) == 0)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+/* find widget by stream name and direction */
+struct snd_sof_widget *snd_sof_find_swidget_sname(struct snd_sof_dev *sdev,
+ const char *pcm_name, int dir)
+{
+ struct snd_sof_widget *swidget;
+ enum snd_soc_dapm_type type;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ type = snd_soc_dapm_aif_in;
+ else
+ type = snd_soc_dapm_aif_out;
+
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (!strcmp(pcm_name, swidget->widget->sname) && swidget->id == type)
+ return swidget;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_dai *snd_sof_find_dai(struct snd_sof_dev *sdev,
+ const char *name)
+{
+ struct snd_sof_dai *dai;
+
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (dai->name && (strcmp(name, dai->name) == 0))
+ return dai;
+ }
+
+ return NULL;
+}
+
+/*
+ * FW Panic/fault handling.
+ */
+
+struct sof_panic_msg {
+ u32 id;
+ const char *msg;
+};
+
+/* standard FW panic types */
+static const struct sof_panic_msg panic_msg[] = {
+ {SOF_IPC_PANIC_MEM, "out of memory"},
+ {SOF_IPC_PANIC_WORK, "work subsystem init failed"},
+ {SOF_IPC_PANIC_IPC, "IPC subsystem init failed"},
+ {SOF_IPC_PANIC_ARCH, "arch init failed"},
+ {SOF_IPC_PANIC_PLATFORM, "platform init failed"},
+ {SOF_IPC_PANIC_TASK, "scheduler init failed"},
+ {SOF_IPC_PANIC_EXCEPTION, "runtime exception"},
+ {SOF_IPC_PANIC_DEADLOCK, "deadlock"},
+ {SOF_IPC_PANIC_STACK, "stack overflow"},
+ {SOF_IPC_PANIC_IDLE, "can't enter idle"},
+ {SOF_IPC_PANIC_WFI, "invalid wait state"},
+ {SOF_IPC_PANIC_ASSERT, "assertion failed"},
+};
+
+/*
+ * helper to be called from .dbg_dump callbacks. No error code is
+ * provided, it's left as an exercise for the caller of .dbg_dump
+ * (typically IPC or loader)
+ */
+void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
+ u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words)
+{
+ u32 code;
+ int i;
+
+ /* is firmware dead ? */
+ if ((panic_code & SOF_IPC_PANIC_MAGIC_MASK) != SOF_IPC_PANIC_MAGIC) {
+ dev_err(sdev->dev, "error: unexpected fault 0x%8.8x trace 0x%8.8x\n",
+ panic_code, tracep_code);
+ return; /* no fault ? */
+ }
+
+ code = panic_code & (SOF_IPC_PANIC_MAGIC_MASK | SOF_IPC_PANIC_CODE_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(panic_msg); i++) {
+ if (panic_msg[i].id == code) {
+ dev_err(sdev->dev, "error: %s\n", panic_msg[i].msg);
+ dev_err(sdev->dev, "error: trace point %8.8x\n",
+ tracep_code);
+ goto out;
+ }
+ }
+
+ /* unknown error */
+ dev_err(sdev->dev, "error: unknown reason %8.8x\n", panic_code);
+ dev_err(sdev->dev, "error: trace point %8.8x\n", tracep_code);
+
+out:
+ dev_err(sdev->dev, "error: panic at %s:%d\n",
+ panic_info->filename, panic_info->linenum);
+ sof_oops(sdev, oops);
+ sof_stack(sdev, oops, stack, stack_words);
+}
+EXPORT_SYMBOL(snd_sof_get_status);
+
+/*
+ * Generic buffer page table creation.
+ * Take the each physical page address and drop the least significant unused
+ * bits from each (based on PAGE_SIZE). Then pack valid page address bits
+ * into compressed page table.
+ */
+
+int snd_sof_create_page_table(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size)
+{
+ int i, pages;
+
+ pages = snd_sgbuf_aligned_pages(size);
+
+ dev_dbg(sdev->dev, "generating page table for %p size 0x%zx pages %d\n",
+ dmab->area, size, pages);
+
+ for (i = 0; i < pages; i++) {
+ /*
+ * The number of valid address bits for each page is 20.
+ * idx determines the byte position within page_table
+ * where the current page's address is stored
+ * in the compressed page_table.
+ * This can be calculated by multiplying the page number by 2.5.
+ */
+ u32 idx = (5 * i) >> 1;
+ u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
+ u8 *pg_table;
+
+ dev_vdbg(sdev->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
+
+ pg_table = (u8 *)(page_table + idx);
+
+ /*
+ * pagetable compression:
+ * byte 0 byte 1 byte 2 byte 3 byte 4 byte 5
+ * ___________pfn 0__________ __________pfn 1___________ _pfn 2...
+ * .... .... .... .... .... .... .... .... .... .... ....
+ * It is created by:
+ * 1. set current location to 0, PFN index i to 0
+ * 2. put pfn[i] at current location in Little Endian byte order
+ * 3. calculate an intermediate value as
+ * x = (pfn[i+1] << 4) | (pfn[i] & 0xf)
+ * 4. put x at offset (current location + 2) in LE byte order
+ * 5. increment current location by 5 bytes, increment i by 2
+ * 6. continue to (2)
+ */
+ if (i & 1)
+ put_unaligned_le32((pg_table[0] & 0xf) | pfn << 4,
+ pg_table);
+ else
+ put_unaligned_le32(pfn, pg_table);
+ }
+
+ return pages;
+}
+
+/*
+ * SOF Driver enumeration.
+ */
+static int sof_machine_check(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ struct snd_soc_acpi_mach *machine;
+ int ret;
+
+ if (plat_data->machine)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC)) {
+ dev_err(sdev->dev, "error: no matching ASoC machine driver found - aborting probe\n");
+ return -ENODEV;
+ }
+
+ /* fallback to nocodec mode */
+ dev_warn(sdev->dev, "No ASoC machine driver found - using nocodec\n");
+ machine = devm_kzalloc(sdev->dev, sizeof(*machine), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
+ ret = sof_nocodec_setup(sdev->dev, plat_data, machine,
+ plat_data->desc, plat_data->desc->ops);
+ if (ret < 0)
+ return ret;
+
+ plat_data->machine = machine;
+
+ return 0;
+}
+
+static int sof_probe_continue(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *drv_name;
+ const void *mach;
+ int size;
+ int ret;
+
+ /* probe the DSP hardware */
+ ret = snd_sof_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to probe DSP %d\n", ret);
+ return ret;
+ }
+
+ /* check machine info */
+ ret = sof_machine_check(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to get machine info %d\n",
+ ret);
+ goto dbg_err;
+ }
+
+ /* set up platform component driver */
+ snd_sof_new_platform_drv(sdev);
+
+ /* register any debug/trace capabilities */
+ ret = snd_sof_dbg_init(sdev);
+ if (ret < 0) {
+ /*
+ * debugfs issues are suppressed in snd_sof_dbg_init() since
+ * we cannot rely on debugfs
+ * here we trap errors due to memory allocation only.
+ */
+ dev_err(sdev->dev, "error: failed to init DSP trace/debug %d\n",
+ ret);
+ goto dbg_err;
+ }
+
+ /* init the IPC */
+ sdev->ipc = snd_sof_ipc_init(sdev);
+ if (!sdev->ipc) {
+ dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret);
+ goto ipc_err;
+ }
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to load DSP firmware %d\n",
+ ret);
+ goto fw_load_err;
+ }
+
+ /* boot the firmware */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to boot DSP firmware %d\n",
+ ret);
+ goto fw_run_err;
+ }
+
+ /* init DMA trace */
+ ret = snd_sof_init_trace(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to initialize trace %d\n", ret);
+ }
+
+ /* hereafter all FW boot flows are for PM reasons */
+ sdev->first_boot = false;
+
+ /* now register audio DSP platform driver and dai */
+ ret = devm_snd_soc_register_component(sdev->dev, &sdev->plat_drv,
+ sof_ops(sdev)->drv,
+ sof_ops(sdev)->num_drv);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to register DSP DAI driver %d\n", ret);
+ goto fw_run_err;
+ }
+
+ drv_name = plat_data->machine->drv_name;
+ mach = (const void *)plat_data->machine;
+ size = sizeof(*plat_data->machine);
+
+ /* register machine driver, pass machine info as pdata */
+ plat_data->pdev_mach =
+ platform_device_register_data(sdev->dev, drv_name,
+ PLATFORM_DEVID_NONE, mach, size);
+
+ if (IS_ERR(plat_data->pdev_mach)) {
+ ret = PTR_ERR(plat_data->pdev_mach);
+ goto comp_err;
+ }
+
+ dev_dbg(sdev->dev, "created machine %s\n",
+ dev_name(&plat_data->pdev_mach->dev));
+
+ if (plat_data->sof_probe_complete)
+ plat_data->sof_probe_complete(sdev->dev);
+
+ return 0;
+
+comp_err:
+ snd_soc_unregister_component(sdev->dev);
+fw_run_err:
+ snd_sof_fw_unload(sdev);
+fw_load_err:
+ snd_sof_ipc_free(sdev);
+ipc_err:
+ snd_sof_free_debug(sdev);
+dbg_err:
+ snd_sof_remove(sdev);
+
+ return ret;
+}
+
+static void sof_probe_work(struct work_struct *work)
+{
+ struct snd_sof_dev *sdev =
+ container_of(work, struct snd_sof_dev, probe_work);
+ int ret;
+
+ ret = sof_probe_continue(sdev);
+ if (ret < 0) {
+ /* errors cannot be propagated, log */
+ dev_err(sdev->dev, "error: %s failed err: %d\n", __func__, ret);
+ }
+}
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
+{
+ struct snd_sof_dev *sdev;
+
+ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ /* initialize sof device */
+ sdev->dev = dev;
+
+ sdev->pdata = plat_data;
+ sdev->first_boot = true;
+ dev_set_drvdata(dev, sdev);
+
+ /* check all mandatory ops */
+ if (!sof_ops(sdev) || !sof_ops(sdev)->probe || !sof_ops(sdev)->run ||
+ !sof_ops(sdev)->block_read || !sof_ops(sdev)->block_write ||
+ !sof_ops(sdev)->send_msg || !sof_ops(sdev)->load_firmware ||
+ !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&sdev->pcm_list);
+ INIT_LIST_HEAD(&sdev->kcontrol_list);
+ INIT_LIST_HEAD(&sdev->widget_list);
+ INIT_LIST_HEAD(&sdev->dai_list);
+ INIT_LIST_HEAD(&sdev->route_list);
+ spin_lock_init(&sdev->ipc_lock);
+ spin_lock_init(&sdev->hw_lock);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ INIT_WORK(&sdev->probe_work, sof_probe_work);
+
+ /* set default timeouts if none provided */
+ if (plat_data->desc->ipc_timeout == 0)
+ sdev->ipc_timeout = TIMEOUT_DEFAULT_IPC_MS;
+ else
+ sdev->ipc_timeout = plat_data->desc->ipc_timeout;
+ if (plat_data->desc->boot_timeout == 0)
+ sdev->boot_timeout = TIMEOUT_DEFAULT_BOOT_MS;
+ else
+ sdev->boot_timeout = plat_data->desc->boot_timeout;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) {
+ schedule_work(&sdev->probe_work);
+ return 0;
+ }
+
+ return sof_probe_continue(sdev);
+}
+EXPORT_SYMBOL(snd_sof_device_probe);
+
+int snd_sof_device_remove(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+ snd_sof_fw_unload(sdev);
+ snd_sof_ipc_free(sdev);
+ snd_sof_free_debug(sdev);
+ snd_sof_free_trace(sdev);
+ snd_sof_remove(sdev);
+
+ /*
+ * Unregister machine driver. This will unbind the snd_card which
+ * will remove the component driver and unload the topology
+ * before freeing the snd_card.
+ */
+ if (!IS_ERR_OR_NULL(pdata->pdev_mach))
+ platform_device_unregister(pdata->pdev_mach);
+
+ /* release firmware */
+ release_firmware(pdata->fw);
+ pdata->fw = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_device_remove);
+
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_DESCRIPTION("Sound Open Firmware (SOF) Core");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-audio");
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
new file mode 100644
index 000000000000..55f1d808dba0
--- /dev/null
+++ b/sound/soc/sof/debug.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic debug routines used to export DSP MMIO and memories to userspace
+// for firmware debugging.
+//
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+static ssize_t sof_dfsentry_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ loff_t pos = *ppos;
+ size_t size_ret;
+ int skip = 0;
+ int size;
+ u8 *buf;
+
+ size = dfse->size;
+
+ /* validate position & count */
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= size || !count)
+ return 0;
+ /* find the minimum. min() is not used since it adds sparse warnings */
+ if (count > size - pos)
+ count = size - pos;
+
+ /* align io read start to u32 multiple */
+ pos = ALIGN_DOWN(pos, 4);
+
+ /* intermediate buffer size must be u32 multiple */
+ size = ALIGN(count, 4);
+
+ /* if start position is unaligned, read extra u32 */
+ if (unlikely(pos != *ppos)) {
+ skip = *ppos - pos;
+ if (pos + size + 4 < dfse->size)
+ size += 4;
+ }
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (dfse->type == SOF_DFSENTRY_TYPE_IOMEM) {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * If the DSP is active: copy from IO.
+ * If the DSP is suspended:
+ * - Copy from IO if the memory is always accessible.
+ * - Otherwise, copy from cached buffer.
+ */
+ if (pm_runtime_active(sdev->dev) ||
+ dfse->access_type == SOF_DEBUGFS_ACCESS_ALWAYS) {
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+ } else {
+ dev_info(sdev->dev,
+ "Copying cached debugfs data\n");
+ memcpy(buf, dfse->cache_buf + pos, size);
+ }
+#else
+ /* if the DSP is in D3 */
+ if (!pm_runtime_active(sdev->dev) &&
+ dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dev_err(sdev->dev,
+ "error: debugfs entry %s cannot be read in DSP D3\n",
+ dfse->dfsentry->d_name.name);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ memcpy_fromio(buf, dfse->io_mem + pos, size);
+#endif
+ } else {
+ memcpy(buf, ((u8 *)(dfse->buf) + pos), size);
+ }
+
+ /* copy to userspace */
+ size_ret = copy_to_user(buffer, buf + skip, count);
+
+ kfree(buf);
+
+ /* update count & position if copy succeeded */
+ if (size_ret)
+ return -EFAULT;
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations sof_dfs_fops = {
+ .open = simple_open,
+ .read = sof_dfsentry_read,
+ .llseek = default_llseek,
+};
+
+/* create FS entry for debug files that can expose DSP memories, registers */
+int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
+ void __iomem *base, size_t size,
+ const char *name,
+ enum sof_debugfs_access_type access_type)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_IOMEM;
+ dfse->io_mem = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+ dfse->access_type = access_type;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /*
+ * allocate cache buffer that will be used to save the mem window
+ * contents prior to suspend
+ */
+ if (access_type == SOF_DEBUGFS_ACCESS_D0_ONLY) {
+ dfse->cache_buf = devm_kzalloc(sdev->dev, size, GFP_KERNEL);
+ if (!dfse->cache_buf)
+ return -ENOMEM;
+ }
+#endif
+
+ dfse->dfsentry = debugfs_create_file(name, 0444, sdev->debugfs_root,
+ dfse, &sof_dfs_fops);
+ if (!dfse->dfsentry) {
+ /* can't rely on debugfs, only log error and keep going */
+ dev_err(sdev->dev, "error: cannot create debugfs entry %s\n",
+ name);
+ } else {
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_io_item);
+
+/* create FS entry for debug files to expose kernel memory */
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = base;
+ dfse->size = size;
+ dfse->sdev = sdev;
+
+ dfse->dfsentry = debugfs_create_file(name, 0444, sdev->debugfs_root,
+ dfse, &sof_dfs_fops);
+ if (!dfse->dfsentry) {
+ /* can't rely on debugfs, only log error and keep going */
+ dev_err(sdev->dev, "error: cannot create debugfs entry %s\n",
+ name);
+ } else {
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_debugfs_buf_item);
+
+int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+{
+ const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
+ const struct snd_sof_debugfs_map *map;
+ int i;
+ int err;
+
+ /* use "sof" as top level debugFS dir */
+ sdev->debugfs_root = debugfs_create_dir("sof", NULL);
+ if (IS_ERR_OR_NULL(sdev->debugfs_root)) {
+ dev_err(sdev->dev, "error: failed to create debugfs directory\n");
+ return 0;
+ }
+
+ /* init dfsentry list */
+ INIT_LIST_HEAD(&sdev->dfsentry_list);
+
+ /* create debugFS files for platform specific MMIO/DSP memories */
+ for (i = 0; i < ops->debug_map_count; i++) {
+ map = &ops->debug_map[i];
+
+ err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] +
+ map->offset, map->size,
+ map->name, map->access_type);
+ /* errors are only due to memory allocation, not debugfs */
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
+
+void snd_sof_free_debug(struct snd_sof_dev *sdev)
+{
+ debugfs_remove_recursive(sdev->debugfs_root);
+}
+EXPORT_SYMBOL_GPL(snd_sof_free_debug);
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
new file mode 100644
index 000000000000..32ee0fabab92
--- /dev/null
+++ b/sound/soc/sof/intel/Kconfig
@@ -0,0 +1,230 @@
+config SND_SOC_SOF_INTEL_TOPLEVEL
+ bool "SOF support for Intel audio DSPs"
+ depends on X86 || COMPILE_TEST
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+if SND_SOC_SOF_INTEL_TOPLEVEL
+
+config SND_SOC_SOF_INTEL_ACPI
+ tristate
+ select SND_SOC_SOF_BAYTRAIL if SND_SOC_SOF_BAYTRAIL_SUPPORT
+ select SND_SOC_SOF_BROADWELL if SND_SOC_SOF_BROADWELL_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_PCI
+ tristate
+ select SND_SOC_SOF_MERRIFIELD if SND_SOC_SOF_MERRIFIELD_SUPPORT
+ select SND_SOC_SOF_APOLLOLAKE if SND_SOC_SOF_APOLLOLAKE_SUPPORT
+ select SND_SOC_SOF_GEMINILAKE if SND_SOC_SOF_GEMINILAKE_SUPPORT
+ select SND_SOC_SOF_CANNONLAKE if SND_SOC_SOF_CANNONLAKE_SUPPORT
+ select SND_SOC_SOF_COFFEELAKE if SND_SOC_SOF_COFFEELAKE_SUPPORT
+ select SND_SOC_SOF_ICELAKE if SND_SOC_SOF_ICELAKE_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ tristate
+ select SND_SOC_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_INTEL_COMMON
+ tristate
+ select SND_SOC_ACPI_INTEL_MATCH
+ select SND_SOC_SOF_XTENSA
+ select SND_SOC_INTEL_MACH
+ select SND_SOC_ACPI if ACPI
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+if SND_SOC_SOF_INTEL_ACPI
+
+config SND_SOC_SOF_BAYTRAIL_SUPPORT
+ bool "SOF support for Baytrail, Braswell and Cherrytrail"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Baytrail, Braswell or Cherrytrail processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_BAYTRAIL
+ tristate
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_BROADWELL_SUPPORT
+ bool "SOF support for Broadwell"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Broadwell processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_BROADWELL
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_INTEL_HIFI_EP_IPC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+endif ## SND_SOC_SOF_INTEL_ACPI
+
+if SND_SOC_SOF_INTEL_PCI
+
+config SND_SOC_SOF_MERRIFIELD_SUPPORT
+ bool "SOF support for Tangier/Merrifield"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tangier/Merrifield processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_MERRIFIELD
+ tristate
+ select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_APOLLOLAKE_SUPPORT
+ bool "SOF support for Apollolake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Apollolake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_APOLLOLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_GEMINILAKE_SUPPORT
+ bool "SOF support for GeminiLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Geminilake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_GEMINILAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_CANNONLAKE_SUPPORT
+ bool "SOF support for Cannonlake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Cannonlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_CANNONLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_COFFEELAKE_SUPPORT
+ bool "SOF support for CoffeeLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Coffeelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_COFFEELAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_ICELAKE_SUPPORT
+ bool "SOF support for Icelake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Icelake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_ICELAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_HDA_COMMON
+ tristate
+ select SND_SOC_SOF_INTEL_COMMON
+ select SND_SOC_SOF_HDA_LINK_BASELINE
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+if SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK
+ bool "SOF support for HDA Links(HDA/HDMI)"
+ depends on SND_SOC_SOF_NOCODEC=n
+ select SND_SOC_SOF_PROBE_WORK_QUEUE
+ help
+ This adds support for HDA links(HDA/HDMI) with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDA links with SOF.
+ If unsure select "N".
+
+config SND_SOC_SOF_HDA_AUDIO_CODEC
+ bool "SOF support for HDAudio codecs"
+ depends on SND_SOC_SOF_HDA_LINK
+ help
+ This adds support for HDAudio codecs with Sound Open Firmware
+ for Intel(R) platforms.
+ Say Y if you want to enable HDAudio codecs with SOF.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_HDA_COMMON
+
+config SND_SOC_SOF_HDA_LINK_BASELINE
+ tristate
+ select SND_SOC_SOF_HDA if SND_SOC_SOF_HDA_LINK
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_HDA
+ tristate
+ select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
+ select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+endif ## SND_SOC_SOF_INTEL_PCI
+
+endif ## SND_SOC_SOF_INTEL_TOPLEVEL
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
new file mode 100644
index 000000000000..b8f58e006e29
--- /dev/null
+++ b/sound/soc/sof/intel/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+snd-sof-intel-byt-objs := byt.o
+snd-sof-intel-bdw-objs := bdw.o
+
+snd-sof-intel-ipc-objs := intel-ipc.o
+
+snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
+ hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
+ hda-dai.o hda-bus.o \
+ apl.o cnl.o
+
+snd-sof-intel-hda-objs := hda-codec.o
+
+obj-$(CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP) += snd-sof-intel-byt.o
+obj-$(CONFIG_SND_SOC_SOF_BROADWELL) += snd-sof-intel-bdw.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC) += snd-sof-intel-ipc.o
+obj-$(CONFIG_SND_SOC_SOF_HDA_COMMON) += snd-sof-intel-hda-common.o
+obj-$(CONFIG_SND_SOC_SOF_HDA) += snd-sof-intel-hda.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
new file mode 100644
index 000000000000..f215d80dce2c
--- /dev/null
+++ b/sound/soc/sof/intel/apl.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Apollolake and GeminiLake
+ */
+
+#include "../sof-priv.h"
+#include "hda.h"
+
+static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+/* apollolake ops */
+const struct snd_sof_dsp_ops sof_apl_ops = {
+ /* probe and remove */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = hda_dsp_ipc_irq_handler,
+ .irq_thread = hda_dsp_ipc_irq_thread,
+
+ /* ipc */
+ .send_msg = hda_dsp_ipc_send_msg,
+ .fw_ready = hda_dsp_ipc_fw_ready,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .ipc_pcm_params = hda_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = apl_dsp_debugfs,
+ .debug_map_count = ARRAY_SIZE(apl_dsp_debugfs),
+ .dbg_dump = hda_dsp_dump,
+ .ipc_dump = hda_ipc_dump,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+ .post_fw_run = hda_dsp_post_fw_run,
+
+ /* dsp core power up/down */
+ .core_power_up = hda_dsp_enable_core,
+ .core_power_down = hda_dsp_core_reset_power_down,
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+};
+EXPORT_SYMBOL(sof_apl_ops);
+
+const struct sof_intel_dsp_desc apl_chip_info = {
+ /* Apollolake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .cores_mask = HDA_DSP_CORE_MASK(0) | HDA_DSP_CORE_MASK(1),
+ .ipc_req = HDA_DSP_REG_HIPCI,
+ .ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
+ .ipc_ack = HDA_DSP_REG_HIPCIE,
+ .ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
+ .ipc_ctl = HDA_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 150,
+ .ssp_count = APL_SSP_COUNT,
+ .ssp_base_offset = APL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL(apl_chip_info);
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
new file mode 100644
index 000000000000..065cb868bdfa
--- /dev/null
+++ b/sound/soc/sof/intel/bdw.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Broadwell
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "shim.h"
+
+/* BARs */
+#define BDW_DSP_BAR 0
+#define BDW_PCI_BAR 1
+
+/*
+ * Debug
+ */
+
+/* DSP memories for BDW */
+#define IRAM_OFFSET 0xA0000
+#define BDW_IRAM_SIZE (10 * 32 * 1024)
+#define DRAM_OFFSET 0x00000
+#define BDW_DRAM_SIZE (20 * 32 * 1024)
+#define SHIM_OFFSET 0xFB000
+#define SHIM_SIZE 0x100
+#define MBOX_OFFSET 0x9E000
+#define MBOX_SIZE 0x1000
+#define MBOX_DUMP_SIZE 0x30
+#define EXCEPT_OFFSET 0x800
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0xFE000
+#define DMAC1_OFFSET 0xFF000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0xFC000
+#define SSP1_OFFSET 0xFD000
+#define SSP_SIZE 0x100
+
+#define BDW_STACK_DUMP_SIZE 32
+
+#define BDW_PANIC_OFFSET(x) ((x) & 0xFFFF)
+
+static const struct snd_sof_debugfs_map bdw_debugfs[] = {
+ {"dmac0", BDW_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BDW_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BDW_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BDW_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BDW_DSP_BAR, IRAM_OFFSET, BDW_IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BDW_DSP_BAR, DRAM_OFFSET, BDW_DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BDW_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void bdw_host_done(struct snd_sof_dev *sdev);
+static void bdw_dsp_done(struct snd_sof_dev *sdev);
+static void bdw_get_reply(struct snd_sof_dev *sdev);
+
+/*
+ * DSP Control.
+ */
+
+static int bdw_run(struct snd_sof_dev *sdev)
+{
+ /* set opportunistic mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH, 0);
+
+ /* set DSP to RUN */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_STALL, 0x0);
+
+ /* return init core mask */
+ return 1;
+}
+
+static int bdw_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset and stall */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_RST | SHIM_CSR_STALL);
+
+ /* keep in reset for 10ms */
+ mdelay(10);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_RST | SHIM_CSR_STALL,
+ SHIM_CSR_STALL);
+
+ return 0;
+}
+
+static int bdw_set_dsp_D0(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+ u32 reg;
+
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE, 0);
+
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ PCI_VDRTCL0_D3PGD, PCI_VDRTCL0_D3PGD);
+
+ /* Set D0 state */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_PMCS,
+ PCI_PMCS_PS_MASK, 0);
+
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sdev->bar[BDW_PCI_BAR] + PCI_PMCS)
+ & PCI_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(20);
+ }
+
+ return -ENODEV;
+
+finish:
+ /*
+ * select SSP1 19.2MHz base clock, SSP clock 0,
+ * turn off Low Power Clock
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR,
+ SHIM_CSR_S1IOCS | SHIM_CSR_SBCS1 |
+ SHIM_CSR_LPCS, 0x0);
+
+ /* stall DSP core, set clk to 192/96Mhz */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_CSR, SHIM_CSR_STALL |
+ SHIM_CSR_DCS_MASK,
+ SHIM_CSR_STALL |
+ SHIM_CSR_DCS(4));
+
+ /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CLKCTL,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0,
+ SHIM_CLKCTL_MASK |
+ SHIM_CLKCTL_DCPLCG |
+ SHIM_CLKCTL_SCOE0);
+
+ /* Stall and reset core, set CSR */
+ bdw_reset(sdev);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE,
+ PCI_VDRTCL2_DCLCGE |
+ PCI_VDRTCL2_DTCGE);
+
+ usleep_range(50, 55);
+
+ /* switch on audio PLL */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL2,
+ PCI_VDRTCL2_APLLSE_MASK, 0);
+
+ /*
+ * set default power gating control, enable power gating control for
+ * all blocks. that is, can't be accessed, please enable each block
+ * before accessing.
+ */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_PCI_BAR, PCI_VDRTCTL0,
+ 0xfffffffC, 0x0);
+
+ /* disable DMA finish function for SSP0 & SSP1 */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_CSR2,
+ SHIM_CSR2_SDFD_SSP1,
+ SHIM_CSR2_SDFD_SSP1);
+
+ /* set on-demond mode on engine 0,1 for all channels */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_HMDC,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH,
+ SHIM_HMDC_HDDA_E0_ALLCH |
+ SHIM_HMDC_HDDA_E1_ALLCH);
+
+ /* Enable Interrupt from both sides */
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ (SHIM_IMRX_BUSY | SHIM_IMRX_DONE), 0x0);
+ snd_sof_dsp_update_bits(sdev, BDW_DSP_BAR, SHIM_IMRD,
+ (SHIM_IMRD_DONE | SHIM_IMRD_BUSY |
+ SHIM_IMRD_SSP0 | SHIM_IMRD_DMAC), 0x0);
+
+ /* clear IPC registers */
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCD, 0x0);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0x80, 0x6);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, 0xe0, 0x300a);
+
+ return 0;
+}
+
+static void bdw_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ /* first read regsisters */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
+ panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
+ sizeof(*panic_info), stack,
+ stack_words * sizeof(u32));
+}
+
+static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BDW_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+ bdw_get_registers(sdev, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
+ BDW_STACK_DUMP_SIZE);
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t bdw_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 isr;
+ int ret = IRQ_NONE;
+
+ /* Interrupt arrived, check src */
+ isr = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_ISRX);
+ if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t bdw_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 ipcx, ipcd, imrx;
+
+ imrx = snd_sof_dsp_read64(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ ipcx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCX);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_IPCX_DONE &&
+ !(imrx & SHIM_IMRX_DONE)) {
+ /* Mask Done interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ bdw_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, ipcx);
+
+ bdw_dsp_done(sdev);
+ }
+
+ ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
+
+ /* new message from DSP */
+ if (ipcd & SHIM_IPCD_BUSY &&
+ !(imrx & SHIM_IMRX_BUSY)) {
+ /* Mask Busy interrupt before return */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR,
+ SHIM_IMRX, SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BDW_PANIC_OFFSET(ipcx) +
+ MBOX_OFFSET);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ bdw_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * IPC Firmware ready.
+ */
+static void bdw_get_windows(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_window_elem *elem;
+ u32 outbox_offset = 0;
+ u32 stream_offset = 0;
+ u32 inbox_offset = 0;
+ u32 outbox_size = 0;
+ u32 stream_size = 0;
+ u32 inbox_size = 0;
+ int i;
+
+ if (!sdev->info_window) {
+ dev_err(sdev->dev, "error: have no window info\n");
+ return;
+ }
+
+ for (i = 0; i < sdev->info_window->num_windows; i++) {
+ elem = &sdev->info_window->window[i];
+
+ switch (elem->type) {
+ case SOF_IPC_REGION_UPBOX:
+ inbox_offset = elem->offset + MBOX_OFFSET;
+ inbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ inbox_offset,
+ elem->size, "inbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DOWNBOX:
+ outbox_offset = elem->offset + MBOX_OFFSET;
+ outbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ outbox_offset,
+ elem->size, "outbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_TRACE:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "etrace",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DEBUG:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "debug",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_STREAM:
+ stream_offset = elem->offset + MBOX_OFFSET;
+ stream_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ stream_offset,
+ elem->size, "stream",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_REGS:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "regs",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_EXCEPTION:
+ sdev->dsp_oops_offset = elem->offset + MBOX_OFFSET;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BDW_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "exception",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ default:
+ dev_err(sdev->dev, "error: get illegal window info\n");
+ return;
+ }
+ }
+
+ if (outbox_size == 0 || inbox_size == 0) {
+ dev_err(sdev->dev, "error: get illegal mailbox window\n");
+ return;
+ }
+
+ snd_sof_dsp_mailbox_init(sdev, inbox_offset, inbox_size,
+ outbox_offset, outbox_size);
+ sdev->stream_box.offset = stream_offset;
+ sdev->stream_box.size = stream_size;
+
+ dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n",
+ stream_offset, stream_size);
+}
+
+/* check for ABI compatibility and create memory windows on first boot */
+static int bdw_fw_ready(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready;
+ u32 offset;
+ int ret;
+
+ /* mailbox must be on 4k boundary */
+ offset = MBOX_OFFSET;
+
+ dev_dbg(sdev->dev, "ipc: DSP is ready 0x%8.8x offset %d\n",
+ msg_id, offset);
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /* copy data from the DSP FW ready offset */
+ sof_block_read(sdev, sdev->mmio_bar, offset, fw_ready,
+ sizeof(*fw_ready));
+
+ snd_sof_dsp_mailbox_init(sdev, fw_ready->dspbox_offset,
+ fw_ready->dspbox_size,
+ fw_ready->hostbox_offset,
+ fw_ready->hostbox_size);
+
+ /* make sure ABI version is compatible */
+ ret = snd_sof_ipc_valid(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* now check for extended data */
+ snd_sof_fw_parse_ext_data(sdev, sdev->mmio_bar, MBOX_OFFSET +
+ sizeof(struct sof_ipc_fw_ready));
+
+ bdw_get_windows(sdev);
+
+ return 0;
+}
+
+/*
+ * IPC Mailbox IO
+ */
+
+static int bdw_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, BDW_DSP_BAR, SHIM_IPCX, SHIM_IPCX_BUSY);
+
+ return 0;
+}
+
+static void bdw_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ spin_lock_irqsave(&sdev->ipc_lock, flags);
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+
+ spin_unlock_irqrestore(&sdev->ipc_lock, flags);
+}
+
+static void bdw_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCD,
+ SHIM_IPCD_BUSY | SHIM_IPCD_DONE,
+ SHIM_IPCD_DONE);
+
+ /* unmask busy interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void bdw_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IPCX,
+ SHIM_IPCX_DONE, 0);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, BDW_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+}
+
+/*
+ * Probe and remove.
+ */
+static int bdw_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_DSP_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BDW_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BDW_DSP_BAR;
+ sdev->mailbox_bar = BDW_DSP_BAR;
+
+ /* PCI base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_pcicfg_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get PCI base at idx %d\n",
+ desc->resindex_pcicfg_base);
+ return -ENODEV;
+ }
+
+ dev_dbg(sdev->dev, "PCI base at 0x%x size 0x%x", base, size);
+ sdev->bar[BDW_PCI_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BDW_PCI_BAR]) {
+ dev_err(sdev->dev,
+ "error: failed to ioremap PCI base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "PCI VADDR %p\n", sdev->bar[BDW_PCI_BAR]);
+
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0) {
+ dev_err(sdev->dev, "error: failed to get IRQ at index %d\n",
+ desc->irqindex_host_ipc);
+ return sdev->ipc_irq;
+ }
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ bdw_irq_handler, bdw_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable the DSP SHIM */
+ ret = bdw_set_dsp_D0(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DSP D0\n");
+ return ret;
+ }
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* set default mailbox */
+ snd_sof_dsp_mailbox_init(sdev, MBOX_OFFSET, MBOX_SIZE, 0, 0);
+
+ return ret;
+}
+
+/* Broadwell DAIs */
+static struct snd_soc_dai_driver bdw_dai[] = {
+{
+ .name = "ssp0-port",
+},
+{
+ .name = "ssp1-port",
+},
+};
+
+/* broadwell ops */
+const struct snd_sof_dsp_ops sof_bdw_ops = {
+ /*Device init */
+ .probe = bdw_probe,
+
+ /* DSP Core Control */
+ .run = bdw_run,
+ .reset = bdw_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* ipc */
+ .send_msg = bdw_send_msg,
+ .fw_ready = bdw_fw_ready,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = bdw_debugfs,
+ .debug_map_count = ARRAY_SIZE(bdw_debugfs),
+ .dbg_dump = bdw_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* Module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = bdw_dai,
+ .num_drv = ARRAY_SIZE(bdw_dai)
+};
+EXPORT_SYMBOL(sof_bdw_ops);
+
+const struct sof_intel_dsp_desc bdw_chip_info = {
+ .cores_num = 1,
+ .cores_mask = 1,
+};
+EXPORT_SYMBOL(bdw_chip_info);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
new file mode 100644
index 000000000000..7bf9143d3106
--- /dev/null
+++ b/sound/soc/sof/intel/byt.c
@@ -0,0 +1,874 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Baytrail, Braswell and Cherrytrail.
+ */
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "shim.h"
+
+/* DSP memories */
+#define IRAM_OFFSET 0x0C0000
+#define IRAM_SIZE (80 * 1024)
+#define DRAM_OFFSET 0x100000
+#define DRAM_SIZE (160 * 1024)
+#define SHIM_OFFSET 0x140000
+#define SHIM_SIZE 0x100
+#define MBOX_OFFSET 0x144000
+#define MBOX_SIZE 0x1000
+#define EXCEPT_OFFSET 0x800
+
+/* DSP peripherals */
+#define DMAC0_OFFSET 0x098000
+#define DMAC1_OFFSET 0x09c000
+#define DMAC2_OFFSET 0x094000
+#define DMAC_SIZE 0x420
+#define SSP0_OFFSET 0x0a0000
+#define SSP1_OFFSET 0x0a1000
+#define SSP2_OFFSET 0x0a2000
+#define SSP3_OFFSET 0x0a4000
+#define SSP4_OFFSET 0x0a5000
+#define SSP5_OFFSET 0x0a6000
+#define SSP_SIZE 0x100
+
+#define BYT_STACK_DUMP_SIZE 32
+
+#define BYT_PCI_BAR_SIZE 0x200000
+
+#define BYT_PANIC_OFFSET(x) (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+/*
+ * Debug
+ */
+
+#define MBOX_DUMP_SIZE 0x30
+
+/* BARs */
+#define BYT_DSP_BAR 0
+#define BYT_PCI_BAR 1
+#define BYT_IMR_BAR 2
+
+static const struct snd_sof_debugfs_map byt_debugfs[] = {
+ {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", BYT_DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", BYT_DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", BYT_DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", BYT_DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void byt_host_done(struct snd_sof_dev *sdev);
+static void byt_dsp_done(struct snd_sof_dev *sdev);
+static void byt_get_reply(struct snd_sof_dev *sdev);
+
+/*
+ * IPC Firmware ready.
+ */
+static void byt_get_windows(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_window_elem *elem;
+ u32 outbox_offset = 0;
+ u32 stream_offset = 0;
+ u32 inbox_offset = 0;
+ u32 outbox_size = 0;
+ u32 stream_size = 0;
+ u32 inbox_size = 0;
+ int i;
+
+ if (!sdev->info_window) {
+ dev_err(sdev->dev, "error: have no window info\n");
+ return;
+ }
+
+ for (i = 0; i < sdev->info_window->num_windows; i++) {
+ elem = &sdev->info_window->window[i];
+
+ switch (elem->type) {
+ case SOF_IPC_REGION_UPBOX:
+ inbox_offset = elem->offset + MBOX_OFFSET;
+ inbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ inbox_offset,
+ elem->size, "inbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DOWNBOX:
+ outbox_offset = elem->offset + MBOX_OFFSET;
+ outbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ outbox_offset,
+ elem->size, "outbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_TRACE:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "etrace",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DEBUG:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "debug",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_STREAM:
+ stream_offset = elem->offset + MBOX_OFFSET;
+ stream_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ stream_offset,
+ elem->size, "stream",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_REGS:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "regs",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_EXCEPTION:
+ sdev->dsp_oops_offset = elem->offset + MBOX_OFFSET;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[BYT_DSP_BAR] +
+ elem->offset +
+ MBOX_OFFSET,
+ elem->size, "exception",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ default:
+ dev_err(sdev->dev, "error: get illegal window info\n");
+ return;
+ }
+ }
+
+ if (outbox_size == 0 || inbox_size == 0) {
+ dev_err(sdev->dev, "error: get illegal mailbox window\n");
+ return;
+ }
+
+ snd_sof_dsp_mailbox_init(sdev, inbox_offset, inbox_size,
+ outbox_offset, outbox_size);
+ sdev->stream_box.offset = stream_offset;
+ sdev->stream_box.size = stream_size;
+
+ dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n",
+ stream_offset, stream_size);
+}
+
+/* check for ABI compatibility and create memory windows on first boot */
+static int byt_fw_ready(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready;
+ u32 offset;
+ int ret;
+
+ /* mailbox must be on 4k boundary */
+ offset = MBOX_OFFSET;
+
+ dev_dbg(sdev->dev, "ipc: DSP is ready 0x%8.8x offset 0x%x\n",
+ msg_id, offset);
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /* copy data from the DSP FW ready offset */
+ sof_block_read(sdev, sdev->mmio_bar, offset, fw_ready,
+ sizeof(*fw_ready));
+
+ snd_sof_dsp_mailbox_init(sdev, fw_ready->dspbox_offset,
+ fw_ready->dspbox_size,
+ fw_ready->hostbox_offset,
+ fw_ready->hostbox_size);
+
+ /* make sure ABI version is compatible */
+ ret = snd_sof_ipc_valid(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* now check for extended data */
+ snd_sof_fw_parse_ext_data(sdev, sdev->mmio_bar, MBOX_OFFSET +
+ sizeof(struct sof_ipc_fw_ready));
+
+ byt_get_windows(sdev);
+
+ return 0;
+}
+
+/*
+ * Debug
+ */
+
+static void byt_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ /* first read regsisters */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+
+ /* then get panic info */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
+ panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
+ sizeof(*panic_info), stack,
+ stack_words * sizeof(u32));
+}
+
+static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[BYT_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCD);
+ panic = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ byt_get_registers(sdev, &xoops, &panic_info, stack,
+ BYT_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
+ BYT_STACK_DUMP_SIZE);
+}
+
+/*
+ * IPC Doorbell IRQ handler and thread.
+ */
+
+static irqreturn_t byt_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 isr;
+ int ret = IRQ_NONE;
+
+ /* Interrupt arrived, check src */
+ isr = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_ISRX);
+ if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t byt_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u64 ipcx, ipcd;
+ u64 imrx;
+
+ imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX);
+ ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+
+ /* reply message from DSP */
+ if (ipcx & SHIM_BYT_IPCX_DONE &&
+ !(imrx & SHIM_IMRX_DONE)) {
+ /* Mask Done interrupt before first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ /*
+ * handle immediate reply from DSP core. If the msg is
+ * found, set done bit in cmd_done which is called at the
+ * end of message processing function, else set it here
+ * because the done bit can't be set in cmd_done function
+ * which is triggered by msg
+ */
+ byt_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, ipcx);
+
+ byt_dsp_done(sdev);
+ }
+
+ /* new message from DSP */
+ ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+ if (ipcd & SHIM_BYT_IPCD_BUSY &&
+ !(imrx & SHIM_IMRX_BUSY)) {
+ /* Mask Busy interrupt before return */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
+
+ /* Handle messages from DSP Core */
+ if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, BYT_PANIC_OFFSET(ipcd) +
+ MBOX_OFFSET);
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ byt_host_done(sdev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int byt_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ u64 cmd = msg->header;
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write64(sdev, BYT_DSP_BAR, SHIM_IPCX,
+ cmd | SHIM_BYT_IPCX_BUSY);
+
+ return 0;
+}
+
+static void byt_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ spin_lock_irqsave(&sdev->ipc_lock, flags);
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+
+ spin_unlock_irqrestore(&sdev->ipc_lock, flags);
+}
+
+static void byt_host_done(struct snd_sof_dev *sdev)
+{
+ /* clear BUSY bit and set DONE bit - accept new messages */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCD,
+ SHIM_BYT_IPCD_BUSY |
+ SHIM_BYT_IPCD_DONE,
+ SHIM_BYT_IPCD_DONE);
+
+ /* unmask busy interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY, 0);
+}
+
+static void byt_dsp_done(struct snd_sof_dev *sdev)
+{
+ /* clear DONE bit - tell DSP we have completed */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCX,
+ SHIM_BYT_IPCX_DONE, 0);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+}
+
+/*
+ * DSP control.
+ */
+
+static int byt_run(struct snd_sof_dev *sdev)
+{
+ int tries = 10;
+
+ /* release stall and wait to unstall */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_STALL, 0x0);
+ while (tries--) {
+ if (!(snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_CSR) &
+ SHIM_BYT_CSR_PWAITMODE))
+ break;
+ msleep(100);
+ }
+ if (tries < 0) {
+ dev_err(sdev->dev, "error: unable to run DSP firmware\n");
+ byt_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ return -ENODEV;
+ }
+
+ /* return init core mask */
+ return 1;
+}
+
+static int byt_reset(struct snd_sof_dev *sdev)
+{
+ /* put DSP into reset, set reset vector and stall */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL |
+ SHIM_BYT_CSR_STALL);
+
+ usleep_range(10, 15);
+
+ /* take DSP out of reset and keep stalled for FW loading */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST, 0);
+
+ return 0;
+}
+
+/* Baytrail DAIs */
+static struct snd_soc_dai_driver byt_dai[] = {
+{
+ .name = "ssp0-port",
+},
+{
+ .name = "ssp1-port",
+},
+{
+ .name = "ssp2-port",
+},
+{
+ .name = "ssp3-port",
+},
+{
+ .name = "ssp4-port",
+},
+{
+ .name = "ssp5-port",
+},
+};
+
+/*
+ * Probe and remove.
+ */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+
+static int tangier_pci_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ u32 base, size;
+ int ret;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(&pci->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+ size = BYT_PCI_BAR_SIZE;
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ base = pci_resource_start(pci, desc->resindex_imr_base);
+ size = pci_resource_len(pci, desc->resindex_imr_base);
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = pci->irq;
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ byt_irq_handler, byt_irq_thread,
+ 0, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x0);
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x0);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+const struct snd_sof_dsp_ops sof_tng_ops = {
+ /* device init */
+ .probe = tangier_pci_probe,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = byt_fw_ready,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+};
+EXPORT_SYMBOL(sof_tng_ops);
+
+const struct sof_intel_dsp_desc tng_chip_info = {
+ .cores_num = 1,
+ .cores_mask = 1,
+};
+EXPORT_SYMBOL(tng_chip_info);
+
+#endif /* CONFIG_SND_SOC_SOF_MERRIFIELD */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+
+static int byt_acpi_probe(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct sof_dev_desc *desc = pdata->desc;
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct resource *mmio;
+ u32 base, size;
+ int ret;
+
+ /* DSP DMA can only access low 31 bits of host memory */
+ ret = dma_coerce_mask_and_coherent(sdev->dev, DMA_BIT_MASK(31));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DMA mask %d\n", ret);
+ return ret;
+ }
+
+ /* LPE base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get LPE base at idx %d\n",
+ desc->resindex_lpe_base);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_DSP_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap LPE base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "LPE VADDR %p\n", sdev->bar[BYT_DSP_BAR]);
+
+ /* TODO: add offsets */
+ sdev->mmio_bar = BYT_DSP_BAR;
+ sdev->mailbox_bar = BYT_DSP_BAR;
+
+ /* IMR base - optional */
+ if (desc->resindex_imr_base == -1)
+ goto irq;
+
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_imr_base);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get IMR base at idx %d\n",
+ desc->resindex_imr_base);
+ return -ENODEV;
+ }
+
+ /* some BIOSes don't map IMR */
+ if (base == 0x55aa55aa || base == 0x0) {
+ dev_info(sdev->dev, "IMR not set by BIOS. Ignoring\n");
+ goto irq;
+ }
+
+ dev_dbg(sdev->dev, "IMR base at 0x%x size 0x%x", base, size);
+ sdev->bar[BYT_IMR_BAR] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[BYT_IMR_BAR]) {
+ dev_err(sdev->dev, "error: failed to ioremap IMR base 0x%x size 0x%x\n",
+ base, size);
+ return -ENODEV;
+ }
+ dev_dbg(sdev->dev, "IMR VADDR %p\n", sdev->bar[BYT_IMR_BAR]);
+
+irq:
+ /* register our IRQ */
+ sdev->ipc_irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+ if (sdev->ipc_irq < 0) {
+ dev_err(sdev->dev, "error: failed to get IRQ at index %d\n",
+ desc->irqindex_host_ipc);
+ return sdev->ipc_irq;
+ }
+
+ dev_dbg(sdev->dev, "using IRQ %d\n", sdev->ipc_irq);
+ ret = devm_request_threaded_irq(sdev->dev, sdev->ipc_irq,
+ byt_irq_handler, byt_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IRQ %d\n",
+ sdev->ipc_irq);
+ return ret;
+ }
+
+ /* enable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x0);
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x0);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return ret;
+}
+
+/* baytrail ops */
+const struct snd_sof_dsp_ops sof_byt_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = byt_fw_ready,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = byt_debugfs,
+ .debug_map_count = ARRAY_SIZE(byt_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ .num_drv = 3, /* we have only 3 SSPs on byt*/
+};
+EXPORT_SYMBOL(sof_byt_ops);
+
+const struct sof_intel_dsp_desc byt_chip_info = {
+ .cores_num = 1,
+ .cores_mask = 1,
+};
+EXPORT_SYMBOL(byt_chip_info);
+
+/* cherrytrail and braswell ops */
+const struct snd_sof_dsp_ops sof_cht_ops = {
+ /* device init */
+ .probe = byt_acpi_probe,
+
+ /* DSP core boot / reset */
+ .run = byt_run,
+ .reset = byt_reset,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = byt_irq_handler,
+ .irq_thread = byt_irq_thread,
+
+ /* ipc */
+ .send_msg = byt_send_msg,
+ .fw_ready = byt_fw_ready,
+
+ .ipc_msg_data = intel_ipc_msg_data,
+ .ipc_pcm_params = intel_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = cht_debugfs,
+ .debug_map_count = ARRAY_SIZE(cht_debugfs),
+ .dbg_dump = byt_dump,
+
+ /* stream callbacks */
+ .pcm_open = intel_pcm_open,
+ .pcm_close = intel_pcm_close,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+
+ /*Firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = byt_dai,
+ /* all 6 SSPs may be available for cherrytrail */
+ .num_drv = ARRAY_SIZE(byt_dai),
+};
+EXPORT_SYMBOL(sof_cht_ops);
+
+const struct sof_intel_dsp_desc cht_chip_info = {
+ .cores_num = 1,
+ .cores_mask = 1,
+};
+EXPORT_SYMBOL(cht_chip_info);
+
+#endif /* CONFIG_SND_SOC_SOF_BAYTRAIL */
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
new file mode 100644
index 000000000000..08a1a3d3c08d
--- /dev/null
+++ b/sound/soc/sof/intel/cnl.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Cannonlake.
+ */
+
+#include "../ops.h"
+#include "hda.h"
+
+static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
+
+static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ u32 hipci;
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+ u32 hipctdd;
+ u32 msg;
+ u32 msg_ext;
+ irqreturn_t ret = IRQ_NONE;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+
+ /* reenable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+
+ /* reply message from DSP */
+ if (hipcida & CNL_DSP_REG_HIPCIDA_DONE &&
+ hipcctl & CNL_DSP_REG_HIPCCTL_DONE) {
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCIDR);
+ msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
+ msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE, 0);
+
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+
+ if (sdev->code_loading) {
+ sdev->code_loading = 0;
+ wake_up(&sdev->waitq);
+ }
+
+ cnl_ipc_dsp_done(sdev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* new message from DSP */
+ if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
+ hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDD);
+ msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
+ msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* handle messages from DSP */
+ if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) ==
+ SOF_IPC_PANIC_MAGIC) {
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext));
+ } else {
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ /*
+ * clear busy interrupt to tell dsp controller this
+ * interrupt has been accepted, not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDR,
+ CNL_DSP_REG_HIPCTDR_BUSY,
+ CNL_DSP_REG_HIPCTDR_BUSY);
+
+ cnl_ipc_host_done(sdev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set done bit to ack dsp the msg has been
+ * processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCTDA,
+ CNL_DSP_REG_HIPCTDA_DONE,
+ CNL_DSP_REG_HIPCTDA_DONE);
+}
+
+static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCIDA,
+ CNL_DSP_REG_HIPCIDA_DONE,
+ CNL_DSP_REG_HIPCIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ CNL_DSP_REG_HIPCCTL,
+ CNL_DSP_REG_HIPCCTL_DONE,
+ CNL_DSP_REG_HIPCCTL_DONE);
+}
+
+static int cnl_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ u32 cmd = msg->header;
+
+ /* send the message */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ cmd | CNL_DSP_REG_HIPCIDR_BUSY);
+
+ return 0;
+}
+
+static void cnl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+
+ /* read IPC status */
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcida, hipctdr, hipcctl);
+}
+
+/* cannonlake ops */
+const struct snd_sof_dsp_ops sof_cnl_ops = {
+ /* probe and remove */
+ .probe = hda_dsp_probe,
+ .remove = hda_dsp_remove,
+
+ /* Register IO */
+ .write = sof_io_write,
+ .read = sof_io_read,
+ .write64 = sof_io_write64,
+ .read64 = sof_io_read64,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* doorbell */
+ .irq_handler = hda_dsp_ipc_irq_handler,
+ .irq_thread = cnl_ipc_irq_thread,
+
+ /* ipc */
+ .send_msg = cnl_ipc_send_msg,
+ .fw_ready = hda_dsp_ipc_fw_ready,
+
+ .ipc_msg_data = hda_ipc_msg_data,
+ .ipc_pcm_params = hda_ipc_pcm_params,
+
+ /* debug */
+ .debug_map = cnl_dsp_debugfs,
+ .debug_map_count = ARRAY_SIZE(cnl_dsp_debugfs),
+ .dbg_dump = hda_dsp_dump,
+ .ipc_dump = cnl_ipc_dump,
+
+ /* stream callbacks */
+ .pcm_open = hda_dsp_pcm_open,
+ .pcm_close = hda_dsp_pcm_close,
+ .pcm_hw_params = hda_dsp_pcm_hw_params,
+ .pcm_trigger = hda_dsp_pcm_trigger,
+ .pcm_pointer = hda_dsp_pcm_pointer,
+
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_raw,
+
+ /* pre/post fw run */
+ .pre_fw_run = hda_dsp_pre_fw_run,
+ .post_fw_run = hda_dsp_post_fw_run,
+
+ /* dsp core power up/down */
+ .core_power_up = hda_dsp_enable_core,
+ .core_power_down = hda_dsp_core_reset_power_down,
+
+ /* firmware run */
+ .run = hda_dsp_cl_boot_firmware,
+
+ /* trace callback */
+ .trace_init = hda_dsp_trace_init,
+ .trace_release = hda_dsp_trace_release,
+ .trace_trigger = hda_dsp_trace_trigger,
+
+ /* DAI drivers */
+ .drv = skl_dai,
+ .num_drv = SOF_SKL_NUM_DAIS,
+
+ /* PM */
+ .suspend = hda_dsp_suspend,
+ .resume = hda_dsp_resume,
+ .runtime_suspend = hda_dsp_runtime_suspend,
+ .runtime_resume = hda_dsp_runtime_resume,
+ .set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+};
+EXPORT_SYMBOL(sof_cnl_ops);
+
+const struct sof_intel_dsp_desc cnl_chip_info = {
+ /* Cannonlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .cores_mask = HDA_DSP_CORE_MASK(0) |
+ HDA_DSP_CORE_MASK(1) |
+ HDA_DSP_CORE_MASK(2) |
+ HDA_DSP_CORE_MASK(3),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = CNL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL(cnl_chip_info);
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
new file mode 100644
index 000000000000..a7e6d8227df6
--- /dev/null
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+
+#include <linux/io.h>
+#include <sound/hdaudio.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+static const struct hdac_bus_ops bus_ops = {
+ .command = snd_hdac_bus_send_cmd,
+ .get_response = snd_hdac_bus_get_response,
+};
+
+#endif
+
+static void sof_hda_writel(u32 value, u32 __iomem *addr)
+{
+ writel(value, addr);
+}
+
+static u32 sof_hda_readl(u32 __iomem *addr)
+{
+ return readl(addr);
+}
+
+static void sof_hda_writew(u16 value, u16 __iomem *addr)
+{
+ writew(value, addr);
+}
+
+static u16 sof_hda_readw(u16 __iomem *addr)
+{
+ return readw(addr);
+}
+
+static void sof_hda_writeb(u8 value, u8 __iomem *addr)
+{
+ writeb(value, addr);
+}
+
+static u8 sof_hda_readb(u8 __iomem *addr)
+{
+ return readb(addr);
+}
+
+static int sof_hda_dma_alloc_pages(struct hdac_bus *bus, int type,
+ size_t size, struct snd_dma_buffer *buf)
+{
+ return snd_dma_alloc_pages(type, bus->dev, size, buf);
+}
+
+static void sof_hda_dma_free_pages(struct hdac_bus *bus,
+ struct snd_dma_buffer *buf)
+{
+ snd_dma_free_pages(buf);
+}
+
+static const struct hdac_io_ops io_ops = {
+ .reg_writel = sof_hda_writel,
+ .reg_readl = sof_hda_readl,
+ .reg_writew = sof_hda_writew,
+ .reg_readw = sof_hda_readw,
+ .reg_writeb = sof_hda_writeb,
+ .reg_readb = sof_hda_readb,
+ .dma_alloc_pages = sof_hda_dma_alloc_pages,
+ .dma_free_pages = sof_hda_dma_free_pages,
+};
+
+/*
+ * This can be used for both with/without hda link support.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_ext_bus_ops *ext_ops)
+{
+ memset(bus, 0, sizeof(*bus));
+ bus->dev = dev;
+
+ bus->io_ops = &io_ops;
+ INIT_LIST_HEAD(&bus->stream_list);
+
+ bus->irq = -1;
+ bus->ext_ops = ext_ops;
+
+ /*
+ * There is only one HDA bus atm. keep the index as 0.
+ * Need to fix when there are more than one HDA bus.
+ */
+ bus->idx = 0;
+
+ spin_lock_init(&bus->reg_lock);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ INIT_LIST_HEAD(&bus->codec_list);
+ INIT_LIST_HEAD(&bus->hlink_list);
+
+ mutex_init(&bus->cmd_mutex);
+ mutex_init(&bus->lock);
+ bus->ops = &bus_ops;
+ INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
+ bus->cmd_dma_state = true;
+#endif
+
+}
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
new file mode 100644
index 000000000000..b8b37f082309
--- /dev/null
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#define IDISP_VID_INTEL 0x80860000
+
+/* load the legacy HDA codec driver */
+#ifdef MODULE
+static void hda_codec_load_module(struct hda_codec *codec)
+{
+ char alias[MODULE_NAME_LEN];
+ const char *module = alias;
+
+ snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
+ dev_dbg(&codec->core.dev, "loading codec module: %s\n", module);
+ request_module(module);
+}
+#else
+static void hda_codec_load_module(struct hda_codec *codec) {}
+#endif
+
+#endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
+
+/* probe individual codec */
+static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
+{
+ struct hda_bus *hbus = sof_to_hbus(sdev);
+ struct hdac_device *hdev;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ struct hdac_hda_priv *hda_priv;
+#endif
+ u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
+ (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+ u32 resp = -1;
+ int ret;
+
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ if (resp == -1)
+ return -EIO;
+ dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
+ address, resp);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
+ hda_priv = kzalloc(sizeof(*hda_priv), GFP_KERNEL);
+ if (!hda_priv)
+ return -ENOMEM;
+
+ hda_priv->codec.bus = hbus;
+ hdev = &hda_priv->codec.core;
+
+ ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev);
+ if (ret < 0)
+ return ret;
+
+ /* use legacy bus only for HDA codecs, idisp uses ext bus */
+ if ((resp & 0xFFFF0000) != IDISP_VID_INTEL) {
+ hdev->type = HDA_DEV_LEGACY;
+ hda_codec_load_module(&hda_priv->codec);
+ }
+
+ return 0;
+#else
+ /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev);
+
+ return ret;
+#endif
+}
+
+/* Codec initialization */
+int hda_codec_probe_bus(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int i, ret;
+
+ /* probe codecs in avail slots */
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+
+ if (!(bus->codec_mask & (1 << i)))
+ continue;
+
+ ret = hda_codec_probe(sdev, i);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: codec #%d probe error, ret: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hda_codec_probe_bus);
+
+#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+
+void hda_codec_i915_get(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ dev_dbg(bus->dev, "Turning i915 HDAC power on\n");
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
+}
+EXPORT_SYMBOL(hda_codec_i915_get);
+
+void hda_codec_i915_put(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+
+ dev_dbg(bus->dev, "Turning i915 HDAC power off\n");
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+}
+EXPORT_SYMBOL(hda_codec_i915_put);
+
+int hda_codec_i915_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* i915 exposes a HDA codec for HDMI audio */
+ ret = snd_hdac_i915_init(bus);
+ if (ret < 0)
+ return ret;
+
+ hda_codec_i915_get(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(hda_codec_i915_init);
+
+int hda_codec_i915_exit(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ hda_codec_i915_put(sdev);
+
+ ret = snd_hdac_i915_exit(bus);
+
+ return ret;
+}
+EXPORT_SYMBOL(hda_codec_i915_exit);
+
+#endif /* CONFIG_SND_SOC_HDAC_HDMI */
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
new file mode 100644
index 000000000000..2c3645736e1f
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include "../ops.h"
+#include "hda.h"
+
+/*
+ * HDA Operations.
+ */
+
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset)
+{
+ unsigned long timeout;
+ u32 gctl = 0;
+ u32 val;
+
+ /* 0 to enter reset and 1 to exit reset */
+ val = reset ? 0 : SOF_HDA_GCTL_RESET;
+
+ /* enter/exit HDA controller reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL,
+ SOF_HDA_GCTL_RESET, val);
+
+ /* wait to enter/exit reset */
+ timeout = jiffies + msecs_to_jiffies(HDA_DSP_CTRL_RESET_TIMEOUT);
+ while (time_before(jiffies, timeout)) {
+ gctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL);
+ if ((gctl & SOF_HDA_GCTL_RESET) == val)
+ return 0;
+ usleep_range(500, 1000);
+ }
+
+ /* enter/exit reset failed */
+ dev_err(sdev->dev, "error: failed to %s HDA controller gctl 0x%x\n",
+ reset ? "reset" : "ready", gctl);
+ return -EIO;
+}
+
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 cap, offset, feature;
+ int count = 0;
+
+ offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
+
+ do {
+ cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
+
+ dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
+ offset & SOF_HDA_CAP_NEXT_MASK);
+
+ feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
+
+ switch (feature) {
+ case SOF_HDA_PP_CAP_ID:
+ dev_dbg(sdev->dev, "found DSP capability at 0x%x\n",
+ offset);
+ bus->ppcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_PP_BAR] = bus->ppcap;
+ break;
+ case SOF_HDA_SPIB_CAP_ID:
+ dev_dbg(sdev->dev, "found SPIB capability at 0x%x\n",
+ offset);
+ bus->spbcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_SPIB_BAR] = bus->spbcap;
+ break;
+ case SOF_HDA_DRSM_CAP_ID:
+ dev_dbg(sdev->dev, "found DRSM capability at 0x%x\n",
+ offset);
+ bus->drsmcap = bus->remap_addr + offset;
+ sdev->bar[HDA_DSP_DRSM_BAR] = bus->drsmcap;
+ break;
+ case SOF_HDA_GTS_CAP_ID:
+ dev_dbg(sdev->dev, "found GTS capability at 0x%x\n",
+ offset);
+ bus->gtscap = bus->remap_addr + offset;
+ break;
+ case SOF_HDA_ML_CAP_ID:
+ dev_dbg(sdev->dev, "found ML capability at 0x%x\n",
+ offset);
+ bus->mlcap = bus->remap_addr + offset;
+ break;
+ default:
+ dev_vdbg(sdev->dev, "found capability %d at 0x%x\n",
+ feature, offset);
+ break;
+ }
+
+ offset = cap & SOF_HDA_CAP_NEXT_MASK;
+ } while (count++ <= SOF_HDA_MAX_CAPS && offset);
+
+ return 0;
+}
+
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_GPROCEN : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, val);
+}
+
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? SOF_HDA_PPCTL_PIE : 0;
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, val);
+}
+
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
+{
+ u32 val = enable ? PCI_CGCTL_MISCBDCGE_MASK : 0;
+
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_MISCBDCGE_MASK, val);
+}
+
+/*
+ * enable/disable audio dsp clock gating and power gating bits.
+ * This allows the HW to opportunistically power and clock gate
+ * the audio dsp when it is idle
+ */
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#endif
+ u32 val;
+
+ /* enable/disable audio dsp clock gating */
+ val = enable ? PCI_CGCTL_ADSPDCGE : 0;
+ snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* enable/disable L1 support */
+ val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
+ snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
+#endif
+
+ /* enable/disable audio dsp power gating */
+ val = enable ? 0 : PCI_PGCTL_ADSPPGD;
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL, PCI_PGCTL_ADSPPGD, val);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+/*
+ * While performing reset, controller may not come back properly and causing
+ * issues, so recommendation is to set CGCTL.MISCBDCGE to 0 then do reset
+ * (init chip) and then again set CGCTL.MISCBDCGE to 1
+ */
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+ ret = snd_hdac_bus_init_chip(bus, full_reset);
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+
+ return ret;
+}
+#endif
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
new file mode 100644
index 000000000000..e1decf25aeac
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <sound/pcm_params.h>
+#include <sound/hdaudio_ext.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+struct hda_pipe_params {
+ u8 host_dma_id;
+ u8 link_dma_id;
+ u32 ch;
+ u32 s_freq;
+ u32 s_fmt;
+ u8 linktype;
+ snd_pcm_format_t format;
+ int link_index;
+ int stream;
+ unsigned int host_bps;
+ unsigned int link_bps;
+};
+
+/*
+ * Unlike GP dma, there is a set of stream registers in hda controller
+ * to control the link dma channels. Each register controls one link
+ * dma channel and the relation is fixed. To make sure FW uses correct
+ * link dma channels, host allocates stream registers and sends the
+ * corresponding link dma channels to FW to allocate link dma channel
+ *
+ * FIXME: this API is abused in the sense that tx_num and rx_num are
+ * passed as arguments, not returned. We need to find a better way to
+ * retrieve the stream tag allocated for the link DMA
+ */
+static int hda_link_dma_get_channels(struct snd_soc_dai *dai,
+ unsigned int *tx_num,
+ unsigned int *tx_slot,
+ unsigned int *rx_num,
+ unsigned int *rx_slot)
+{
+ struct hdac_bus *bus;
+ struct hdac_ext_stream *stream;
+ struct snd_pcm_substream substream;
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+
+ bus = sof_to_bus(sdev);
+
+ memset(&substream, 0, sizeof(substream));
+ if (*tx_num == 1) {
+ substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ stream = snd_hdac_ext_stream_assign(bus, &substream,
+ HDAC_EXT_STREAM_TYPE_LINK);
+ if (!stream) {
+ dev_err(bus->dev, "error: failed to find a free hda ext stream for playback");
+ return -EBUSY;
+ }
+
+ snd_soc_dai_set_dma_data(dai, &substream, stream);
+ *tx_slot = hdac_stream(stream)->stream_tag - 1;
+
+ dev_dbg(bus->dev, "link dma channel %d for playback", *tx_slot);
+ }
+
+ if (*rx_num == 1) {
+ substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+ stream = snd_hdac_ext_stream_assign(bus, &substream,
+ HDAC_EXT_STREAM_TYPE_LINK);
+ if (!stream) {
+ dev_err(bus->dev, "error: failed to find a free hda ext stream for capture");
+ return -EBUSY;
+ }
+
+ snd_soc_dai_set_dma_data(dai, &substream, stream);
+ *rx_slot = hdac_stream(stream)->stream_tag - 1;
+
+ dev_dbg(bus->dev, "link dma channel %d for capture", *rx_slot);
+ }
+
+ return 0;
+}
+
+static int hda_link_dma_params(struct hdac_ext_stream *stream,
+ struct hda_pipe_params *params)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ unsigned char stream_tag = hstream->stream_tag;
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_link *link;
+ unsigned int format_val;
+
+ snd_hdac_ext_stream_decouple(bus, stream, true);
+ snd_hdac_ext_link_stream_reset(stream);
+
+ format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
+ params->format,
+ params->link_bps, 0);
+
+ dev_dbg(bus->dev, "format_val=%d, rate=%d, ch=%d, format=%d\n",
+ format_val, params->s_freq, params->ch, params->format);
+
+ snd_hdac_ext_link_stream_setup(stream, format_val);
+
+ if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ list_for_each_entry(link, &bus->hlink_list, list) {
+ if (link->index == params->link_index)
+ snd_hdac_ext_link_set_stream_id(link,
+ stream_tag);
+ }
+ }
+
+ stream->link_prepared = 1;
+
+ return 0;
+}
+
+static int hda_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_bus *bus = hstream->bus;
+ struct hdac_ext_stream *link_dev;
+ struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct sof_intel_hda_stream *hda_stream;
+ struct hda_pipe_params p_params = {0};
+ struct hdac_ext_link *link;
+ int stream_tag;
+
+ link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+ hda_stream = container_of(link_dev, struct sof_intel_hda_stream,
+ hda_stream);
+ hda_stream->hw_params_upon_resume = 0;
+
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+ if (!link)
+ return -EINVAL;
+
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+
+ /* set the stream tag in the codec dai dma params */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
+ else
+ snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0);
+
+ p_params.s_fmt = snd_pcm_format_width(params_format(params));
+ p_params.ch = params_channels(params);
+ p_params.s_freq = params_rate(params);
+ p_params.stream = substream->stream;
+ p_params.link_dma_id = stream_tag - 1;
+ p_params.link_index = link->index;
+ p_params.format = params_format(params);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ p_params.link_bps = codec_dai->driver->playback.sig_bits;
+ else
+ p_params.link_bps = codec_dai->driver->capture.sig_bits;
+
+ return hda_link_dma_params(link_dev, &p_params);
+}
+
+static int hda_link_pcm_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *link_dev =
+ snd_soc_dai_get_dma_data(dai, substream);
+ struct sof_intel_hda_stream *hda_stream;
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+ int stream = substream->stream;
+
+ hda_stream = container_of(link_dev, struct sof_intel_hda_stream,
+ hda_stream);
+
+ /* setup hw_params again only if resuming from system suspend */
+ if (!hda_stream->hw_params_upon_resume)
+ return 0;
+
+ dev_dbg(sdev->dev, "hda: prepare stream dir %d\n", substream->stream);
+
+ return hda_link_hw_params(substream, &rtd->dpcm[stream].hw_params,
+ dai);
+}
+
+static int hda_link_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *link_dev =
+ snd_soc_dai_get_dma_data(dai, substream);
+ int ret;
+
+ dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* set up hw_params */
+ ret = hda_link_pcm_prepare(substream, dai);
+ if (ret < 0) {
+ dev_err(dai->dev,
+ "error: setting up hw_params during resume\n");
+ return ret;
+ }
+
+ /* fallthrough */
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_link_stream_start(link_dev);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_hdac_ext_link_stream_clear(link_dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * FIXME: This API is also abused since it's used for two purposes.
+ * when the substream argument is NULL this function is used for cleanups
+ * that aren't necessarily required, and called explicitly by handling
+ * ASoC core structures, which is not recommended.
+ * This part will be reworked in follow-up patches.
+ */
+static int hda_link_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ const char *name;
+ unsigned int stream_tag;
+ struct hdac_bus *bus;
+ struct hdac_ext_link *link;
+ struct hdac_stream *hstream;
+ struct hdac_ext_stream *stream;
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_ext_stream *link_dev;
+ struct snd_pcm_substream pcm_substream;
+
+ memset(&pcm_substream, 0, sizeof(pcm_substream));
+ if (substream) {
+ hstream = substream->runtime->private_data;
+ bus = hstream->bus;
+ rtd = snd_pcm_substream_chip(substream);
+ link_dev = snd_soc_dai_get_dma_data(dai, substream);
+ snd_hdac_ext_stream_decouple(bus, link_dev, false);
+ name = rtd->codec_dai->component->name;
+ link = snd_hdac_ext_bus_get_link(bus, name);
+ if (!link)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ stream_tag = hdac_stream(link_dev)->stream_tag;
+ snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+ }
+
+ link_dev->link_prepared = 0;
+ } else {
+ /* release all hda streams when dai link is unloaded */
+ pcm_substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ stream = snd_soc_dai_get_dma_data(dai, &pcm_substream);
+ if (stream) {
+ snd_soc_dai_set_dma_data(dai, &pcm_substream, NULL);
+ snd_hdac_ext_stream_release(stream,
+ HDAC_EXT_STREAM_TYPE_LINK);
+ }
+
+ pcm_substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+ stream = snd_soc_dai_get_dma_data(dai, &pcm_substream);
+ if (stream) {
+ snd_soc_dai_set_dma_data(dai, &pcm_substream, NULL);
+ snd_hdac_ext_stream_release(stream,
+ HDAC_EXT_STREAM_TYPE_LINK);
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops hda_link_dai_ops = {
+ .hw_params = hda_link_hw_params,
+ .hw_free = hda_link_hw_free,
+ .trigger = hda_link_pcm_trigger,
+ .prepare = hda_link_pcm_prepare,
+ .get_channel_map = hda_link_dma_get_channels,
+};
+#endif
+
+/*
+ * common dai driver for skl+ platforms.
+ * some products who use this DAI array only physically have a subset of
+ * the DAIs, but no harm is done here by adding the whole set.
+ */
+struct snd_soc_dai_driver skl_dai[] = {
+{
+ .name = "SSP0 Pin",
+},
+{
+ .name = "SSP1 Pin",
+},
+{
+ .name = "SSP2 Pin",
+},
+{
+ .name = "SSP3 Pin",
+},
+{
+ .name = "SSP4 Pin",
+},
+{
+ .name = "SSP5 Pin",
+},
+{
+ .name = "DMIC01 Pin",
+},
+{
+ .name = "DMIC16k Pin",
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+{
+ .name = "iDisp1 Pin",
+ .ops = &hda_link_dai_ops,
+},
+{
+ .name = "iDisp2 Pin",
+ .ops = &hda_link_dai_ops,
+},
+{
+ .name = "iDisp3 Pin",
+ .ops = &hda_link_dai_ops,
+},
+{
+ .name = "Analog CPU DAI",
+ .ops = &hda_link_dai_ops,
+},
+{
+ .name = "Digital CPU DAI",
+ .ops = &hda_link_dai_ops,
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .ops = &hda_link_dai_ops,
+},
+#endif
+};
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
new file mode 100644
index 000000000000..5b73115a0b78
--- /dev/null
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include "../ops.h"
+#include "hda.h"
+
+/*
+ * DSP Core control.
+ */
+
+int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+ u32 reset;
+ int ret;
+
+ /* set reset bits for cores */
+ reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ reset, reset),
+
+ /* poll with timeout to check if operation successful */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ ((adspcs & reset) == reset),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+
+ /* has core entered reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: reset enter failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int crst;
+ u32 adspcs;
+ int ret;
+
+ /* clear reset bits for cores */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CRST_MASK(core_mask),
+ 0);
+
+ /* poll with timeout to check if operation successful */
+ crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & crst),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+
+ /* has core left reset ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(sdev->dev,
+ "error: reset leave failed: core_mask %x adspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ /* stall core */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+ /* set reset state */
+ return hda_dsp_core_reset_enter(sdev, core_mask);
+}
+
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int ret;
+
+ /* leave reset state */
+ ret = hda_dsp_core_reset_leave(sdev, core_mask);
+ if (ret < 0)
+ return ret;
+
+ /* run core */
+ dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
+ 0);
+
+ /* is core now running ? */
+ if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
+ hda_dsp_core_stall_reset(sdev, core_mask);
+ dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
+ core_mask);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * Power Management.
+ */
+
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ unsigned int cpa;
+ u32 adspcs;
+ int ret;
+
+ /* update bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask),
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask));
+
+ /* poll with timeout to check if operation successful */
+ cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ (adspcs & cpa) == cpa,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: timeout on core powerup\n");
+
+ /* did core power up ? */
+ adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS);
+ if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
+ HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(sdev->dev,
+ "error: power up core failed core_mask %xadspcs 0x%x\n",
+ core_mask, adspcs);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ u32 adspcs;
+
+ /* update bits */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS,
+ HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
+
+ return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPCS, adspcs,
+ !(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+}
+
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ int val;
+ bool is_enable;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
+
+ is_enable = ((val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
+ (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)));
+
+ dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
+
+ return is_enable;
+}
+
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
+{
+ int ret;
+
+ /* return if core is already enabled */
+ if (hda_dsp_core_is_enabled(sdev, core_mask))
+ return 0;
+
+ /* power up */
+ ret = hda_dsp_core_power_up(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ return hda_dsp_core_run(sdev, core_mask);
+}
+
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ int ret;
+
+ /* place core in reset prior to power down */
+ ret = hda_dsp_core_stall_reset(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
+ core_mask);
+ return ret;
+ }
+
+ /* power down core */
+ ret = hda_dsp_core_power_down(sdev, core_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
+ return ret;
+ }
+
+ /* make sure we are in OFF state */
+ if (hda_dsp_core_is_enabled(sdev, core_mask)) {
+ dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
+
+ /* enable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+}
+
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* disable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, 0);
+
+ /* disable IPC BUSY and DONE interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
+}
+
+static int hda_suspend(struct snd_sof_dev *sdev, int state)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#endif
+ int ret;
+
+ /* disable IPC interrupts */
+ hda_dsp_ipc_int_disable(sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* power down all hda link */
+ snd_hdac_ext_bus_link_power_down_all(bus);
+#endif
+
+ /* power down DSP */
+ ret = hda_dsp_core_reset_power_down(sdev, chip->cores_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to power down core during suspend\n");
+ return ret;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* disable ppcap interrupt */
+ snd_hdac_ext_bus_ppcap_int_enable(bus, false);
+ snd_hdac_ext_bus_ppcap_enable(bus, false);
+
+ /* disable hda bus irq and i/o */
+ snd_hdac_bus_stop_chip(bus);
+#else
+ /* disable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, false);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, false);
+
+ /* disable hda bus irq */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ 0);
+#endif
+
+ /* disable LP retention mode */
+ snd_sof_pci_update_bits(sdev, PCI_PGCTL,
+ PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
+
+ /* reset controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to reset controller during suspend\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hda_resume(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+ int ret;
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* reset and start hda controller */
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to start controller after resume\n");
+ return ret;
+ }
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+
+ /* Reset stream-to-link mapping */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ bus->io_ops->reg_writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+
+ /* enable ppcap interrupt */
+ snd_hdac_ext_bus_ppcap_enable(bus, true);
+ snd_hdac_ext_bus_ppcap_int_enable(bus, true);
+#else
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, false);
+
+ /* reset controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to reset controller during resume\n");
+ return ret;
+ }
+
+ /* take controller out of reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to ready controller during resume\n");
+ return ret;
+ }
+
+ /* enable hda bus irq */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+
+ /* enable ppcap interrupt */
+ hda_dsp_ctrl_ppcap_enable(sdev, true);
+ hda_dsp_ctrl_ppcap_int_enable(sdev, true);
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* turn off the links that were off before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (!hlink->ref_count)
+ snd_hdac_ext_bus_link_power_down(hlink);
+ }
+
+ /* check dma status and clean up CORB/RIRB buffers */
+ if (!bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+
+ return 0;
+}
+
+int hda_dsp_resume(struct snd_sof_dev *sdev)
+{
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ return hda_resume(sdev);
+}
+
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ /* init hda controller. DSP cores will be powered up during fw boot */
+ return hda_resume(sdev);
+}
+
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev, int state)
+{
+ /* stop hda controller and power dsp off */
+ return hda_suspend(sdev, state);
+}
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, int state)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int ret;
+
+ /* stop hda controller and power dsp off */
+ ret = hda_suspend(sdev, state);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: suspending dsp\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_hda_stream *hda_stream;
+ struct hdac_ext_stream *stream;
+ struct hdac_stream *s;
+
+ /* set internal flag for BE */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(stream, struct sof_intel_hda_stream,
+ hda_stream);
+ hda_stream->hw_params_upon_resume = 1;
+ }
+}
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
new file mode 100644
index 000000000000..73ead7070cde
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include "../ops.h"
+#include "hda.h"
+
+static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * tell DSP cmd is done - clear busy
+ * interrupt and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCT,
+ HDA_DSP_REG_HIPCT_BUSY,
+ HDA_DSP_REG_HIPCT_BUSY);
+
+ /* unmask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY,
+ HDA_DSP_REG_HIPCCTL_BUSY);
+}
+
+static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg
+ * from DSP, and processed it, don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE,
+ HDA_DSP_REG_HIPCIE_DONE,
+ HDA_DSP_REG_HIPCIE_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE,
+ HDA_DSP_REG_HIPCCTL_DONE);
+}
+
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ u32 cmd = msg->header;
+
+ /* send IPC message to DSP */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
+ cmd | HDA_DSP_REG_HIPCI_BUSY);
+
+ return 0;
+}
+
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_cmd_hdr *hdr;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Sometimes, there is unexpected reply ipc arriving. The reply
+ * ipc belongs to none of the ipcs sent from driver.
+ * In this case, the driver must ignore the ipc.
+ */
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
+ return;
+ }
+ spin_lock_irqsave(&sdev->ipc_lock, flags);
+
+ hdr = msg->msg_data;
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE)) {
+ /*
+ * memory windows are powered off before sending IPC reply,
+ * so we can't read the mailbox for CTX_SAVE reply.
+ */
+ reply.error = 0;
+ reply.hdr.cmd = SOF_IPC_GLB_REPLY;
+ reply.hdr.size = sizeof(reply);
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ goto out;
+ }
+
+ /* get IPC reply from DSP in the mailbox */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply,
+ sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply correct size ? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+out:
+ msg->reply_error = ret;
+
+ spin_unlock_irqrestore(&sdev->ipc_lock, flags);
+}
+
+static bool hda_dsp_ipc_is_sof(uint32_t msg)
+{
+ return (msg & (HDA_DSP_IPC_PURGE_FW | 0xf << 9)) != msg ||
+ (msg & HDA_DSP_IPC_PURGE_FW) != HDA_DSP_IPC_PURGE_FW;
+}
+
+/* IPC handler thread */
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ irqreturn_t ret = IRQ_NONE;
+ u32 hipci;
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcte;
+ u32 hipcctl;
+ u32 msg;
+ u32 msg_ext;
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* reenable IPC interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
+
+ /* is this a reply message from the DSP */
+ if (hipcie & HDA_DSP_REG_HIPCIE_DONE &&
+ hipcctl & HDA_DSP_REG_HIPCCTL_DONE) {
+ hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCI);
+ msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
+ msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_DONE, 0);
+
+ /* handle immediate reply from DSP core - ignore ROM messages */
+ if (hda_dsp_ipc_is_sof(msg)) {
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
+ }
+
+ /* wake up sleeper if we are loading code */
+ if (sdev->code_loading) {
+ sdev->code_loading = 0;
+ wake_up(&sdev->waitq);
+ }
+
+ /* set the done bit */
+ hda_dsp_ipc_dsp_done(sdev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* is this a new message from DSP */
+ if (hipct & HDA_DSP_REG_HIPCT_BUSY &&
+ hipcctl & HDA_DSP_REG_HIPCCTL_BUSY) {
+
+ hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCTE);
+ msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
+ msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
+
+ dev_vdbg(sdev->dev,
+ "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
+ msg, msg_ext);
+
+ /* mask BUSY interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_HIPCCTL,
+ HDA_DSP_REG_HIPCCTL_BUSY, 0);
+
+ /* handle messages from DSP */
+ if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
+ /* this is a PANIC message !! */
+ snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext));
+ } else {
+ /* normal message - process normally */
+ snd_sof_ipc_msgs_rx(sdev);
+ }
+
+ hda_dsp_ipc_host_done(sdev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/* is this IRQ for ADSP ? - we only care about IPC here */
+irqreturn_t hda_dsp_ipc_irq_handler(int irq, void *context)
+{
+ struct snd_sof_dev *sdev = context;
+ int ret = IRQ_NONE;
+ u32 irq_status;
+
+ spin_lock(&sdev->hw_lock);
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
+ dev_vdbg(sdev->dev, "irq handler: irq_status:0x%x\n", irq_status);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* IPC message ? */
+ if (irq_status & HDA_DSP_ADSPIS_IPC) {
+ /* disable IPC interrupt */
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_ADSPIC,
+ HDA_DSP_ADSPIC_IPC, 0);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+out:
+ spin_unlock(&sdev->hw_lock);
+ return ret;
+}
+
+/* IPC Firmware ready */
+
+static void ipc_get_windows(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_window_elem *elem;
+ u32 outbox_offset = 0;
+ u32 stream_offset = 0;
+ u32 inbox_offset = 0;
+ u32 outbox_size = 0;
+ u32 stream_size = 0;
+ u32 inbox_size = 0;
+ int i;
+
+ if (!sdev->info_window) {
+ dev_err(sdev->dev, "error: have no window info\n");
+ return;
+ }
+
+ for (i = 0; i < sdev->info_window->num_windows; i++) {
+ elem = &sdev->info_window->window[i];
+
+ switch (elem->type) {
+ case SOF_IPC_REGION_UPBOX:
+ inbox_offset =
+ elem->offset + SRAM_WINDOW_OFFSET(elem->id);
+ inbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ inbox_offset,
+ elem->size, "inbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DOWNBOX:
+ outbox_offset =
+ elem->offset + SRAM_WINDOW_OFFSET(elem->id);
+ outbox_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ outbox_offset,
+ elem->size, "outbox",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_TRACE:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ elem->offset +
+ SRAM_WINDOW_OFFSET
+ (elem->id),
+ elem->size, "etrace",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_DEBUG:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ elem->offset +
+ SRAM_WINDOW_OFFSET
+ (elem->id),
+ elem->size, "debug",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_STREAM:
+ stream_offset =
+ elem->offset + SRAM_WINDOW_OFFSET(elem->id);
+ stream_size = elem->size;
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ elem->offset +
+ SRAM_WINDOW_OFFSET
+ (elem->id),
+ elem->size, "stream",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_REGS:
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ elem->offset +
+ SRAM_WINDOW_OFFSET
+ (elem->id),
+ elem->size, "regs",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ case SOF_IPC_REGION_EXCEPTION:
+ sdev->dsp_oops_offset = elem->offset +
+ SRAM_WINDOW_OFFSET(elem->id);
+ snd_sof_debugfs_io_item(sdev,
+ sdev->bar[HDA_DSP_BAR] +
+ elem->offset +
+ SRAM_WINDOW_OFFSET
+ (elem->id),
+ elem->size, "exception",
+ SOF_DEBUGFS_ACCESS_D0_ONLY);
+ break;
+ default:
+ dev_err(sdev->dev, "error: get illegal window info\n");
+ return;
+ }
+ }
+
+ if (outbox_size == 0 || inbox_size == 0) {
+ dev_err(sdev->dev, "error: get illegal mailbox window\n");
+ return;
+ }
+
+ snd_sof_dsp_mailbox_init(sdev, inbox_offset, inbox_size,
+ outbox_offset, outbox_size);
+ sdev->stream_box.offset = stream_offset;
+ sdev->stream_box.size = stream_size;
+
+ dev_dbg(sdev->dev, " mailbox upstream 0x%x - size 0x%x\n",
+ inbox_offset, inbox_size);
+ dev_dbg(sdev->dev, " mailbox downstream 0x%x - size 0x%x\n",
+ outbox_offset, outbox_size);
+ dev_dbg(sdev->dev, " stream region 0x%x - size 0x%x\n",
+ stream_offset, stream_size);
+}
+
+/* check for ABI compatibility and create memory windows on first boot */
+int hda_dsp_ipc_fw_ready(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_fw_ready *fw_ready = &sdev->fw_ready;
+ u32 offset;
+ int ret;
+
+ /* mailbox must be on 4k boundary */
+ offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+
+ dev_dbg(sdev->dev, "ipc: DSP is ready 0x%8.8x offset 0x%x\n",
+ msg_id, offset);
+
+ /* no need to re-check version/ABI for subsequent boots */
+ if (!sdev->first_boot)
+ return 0;
+
+ /* copy data from the DSP FW ready offset */
+ sof_block_read(sdev, sdev->mmio_bar, offset, fw_ready,
+ sizeof(*fw_ready));
+
+ /* make sure ABI version is compatible */
+ ret = snd_sof_ipc_valid(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* now check for extended data */
+ snd_sof_fw_parse_ext_data(sdev, sdev->mmio_bar,
+ HDA_DSP_MBOX_UPLINK_OFFSET +
+ sizeof(struct sof_ipc_fw_ready));
+
+ ipc_get_windows(sdev);
+
+ return 0;
+}
+
+void hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ if (!substream || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = container_of(hstream,
+ struct sof_intel_hda_stream,
+ hda_stream.hstream);
+
+ /* The stream might already be closed */
+ if (hstream)
+ sof_mailbox_read(sdev, hda_stream->stream.posn_offset,
+ p, sz);
+ }
+}
+
+int hda_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_stream *hda_stream;
+ /* validate offset */
+ size_t posn_offset = reply->posn_offset;
+
+ hda_stream = container_of(hstream, struct sof_intel_hda_stream,
+ hda_stream.hstream);
+
+ /* check for unaligned offset or overflow */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ hda_stream->stream.posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, hda_stream->stream.posn_offset);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
new file mode 100644
index 000000000000..6427f0b3a2f1
--- /dev/null
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for HDA DSP code loader
+ */
+
+#include <linux/firmware.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+
+#define HDA_FW_BOOT_ATTEMPTS 3
+
+static int cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
+ unsigned int size, struct snd_dma_buffer *dmab,
+ int direction)
+{
+ struct hdac_ext_stream *dsp_stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ if (direction != SNDRV_PCM_STREAM_PLAYBACK) {
+ dev_err(sdev->dev, "error: code loading DMA is playback only\n");
+ return -EINVAL;
+ }
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction);
+
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+ hstream = &dsp_stream->hstream;
+
+ /* allocate DMA buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: memory alloc failed: %x\n", ret);
+ goto error;
+ }
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->format_val = format;
+ hstream->bufsize = size;
+
+ ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ goto error;
+ }
+
+ hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size);
+
+ return hstream->stream_tag;
+
+error:
+ hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+ snd_dma_free_pages(dmab);
+ return ret;
+}
+
+/*
+ * first boot sequence has some extra steps. core 0 waits for power
+ * status on core 1, so power up core 1 also momentarily, keep it in
+ * reset/stall and then turn it off
+ */
+static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
+ u32 fwsize, int stream_tag)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ int ret;
+ int i;
+
+ /* step 1: power up corex */
+ ret = hda_dsp_core_power_up(sdev, chip->cores_mask);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ /* DSP is powered up, set all SSPs to slave mode */
+ for (i = 0; i < chip->ssp_count; i++) {
+ snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
+ chip->ssp_base_offset
+ + i * SSP_DEV_MEM_SIZE
+ + SSP_SSC1_OFFSET,
+ SSP_SET_SLAVE,
+ SSP_SET_SLAVE);
+ }
+
+ /* step 2: purge FW request */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req,
+ chip->ipc_req_mask | (HDA_DSP_IPC_PURGE_FW |
+ ((stream_tag - 1) << 9)));
+
+ /* step 3: unset core 0 reset state & unstall/run core 0 */
+ ret = hda_dsp_core_run(sdev, HDA_DSP_CORE_MASK(0));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core start failed %d\n", ret);
+ ret = -EIO;
+ goto err;
+ }
+
+ /* step 4: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask)
+ == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_INIT_TIMEOUT_US);
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: waiting for HIPCIE done\n");
+ goto err;
+ }
+
+ /* step 5: power down corex */
+ ret = hda_dsp_core_power_down(sdev,
+ chip->cores_mask & ~(HDA_DSP_CORE_MASK(0)));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: dsp core x power down failed\n");
+ goto err;
+ }
+
+ /* step 6: enable IPC interrupts */
+ hda_dsp_ipc_int_enable(sdev);
+
+ /* step 7: wait for ROM init */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS, status,
+ ((status & HDA_DSP_ROM_STS_MASK)
+ == HDA_DSP_ROM_INIT),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+ if (!ret)
+ return 0;
+
+err:
+ hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
+ hda_dsp_core_reset_power_down(sdev, chip->cores_mask);
+
+ return ret;
+}
+
+static int cl_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+
+ /* code loader is special case that reuses stream ops */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ wait_event_timeout(sdev->waitq, !sdev->code_loading,
+ HDA_DSP_CL_TRIGGER_TIMEOUT);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = true;
+ return 0;
+ default:
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+ }
+}
+
+static struct hdac_ext_stream *get_stream_with_tag(struct snd_sof_dev *sdev,
+ int tag)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s;
+
+ /* get stream with tag */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == SNDRV_PCM_STREAM_PLAYBACK &&
+ s->stream_tag == tag) {
+ return stream_to_hdac_ext_stream(s);
+ }
+ }
+
+ return NULL;
+}
+
+static int cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
+ struct hdac_ext_stream *stream)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret;
+
+ ret = hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ hda_dsp_stream_put(sdev, SNDRV_PCM_STREAM_PLAYBACK,
+ hstream->stream_tag);
+ hstream->running = 0;
+ hstream->substream = NULL;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL, 0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU, 0);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+ hstream->bufsize = 0;
+ hstream->format_val = 0;
+
+ return ret;
+}
+
+static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
+{
+ unsigned int reg;
+ int ret, status;
+
+ ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger start failed\n");
+ return ret;
+ }
+
+ status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS, reg,
+ ((reg & HDA_DSP_ROM_STS_MASK)
+ == HDA_DSP_ROM_FW_ENTERED),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_BASEFW_TIMEOUT_US);
+
+ ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: DMA trigger stop failed\n");
+ return ret;
+ }
+
+ return status;
+}
+
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const struct sof_dev_desc *desc = plat_data->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+ struct hdac_ext_stream *stream;
+ struct firmware stripped_firmware;
+ int ret, ret1, tag, i;
+
+ chip_info = desc->chip_info;
+
+ stripped_firmware.data = plat_data->fw->data;
+ stripped_firmware.size = plat_data->fw->size;
+
+ /* init for booting wait */
+ init_waitqueue_head(&sdev->boot_wait);
+ sdev->boot_complete = false;
+
+ /* prepare DMA for code loader stream */
+ tag = cl_stream_prepare(sdev, 0x40, stripped_firmware.size,
+ &sdev->dmab, SNDRV_PCM_STREAM_PLAYBACK);
+
+ if (tag < 0) {
+ dev_err(sdev->dev, "error: dma prepare for fw loading err: %x\n",
+ tag);
+ return tag;
+ }
+
+ /* get stream with tag */
+ stream = get_stream_with_tag(sdev, tag);
+ if (!stream) {
+ dev_err(sdev->dev,
+ "error: could not get stream with stream tag %d\n",
+ tag);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ memcpy(sdev->dmab.area, stripped_firmware.data,
+ stripped_firmware.size);
+
+ /* try ROM init a few times before giving up */
+ for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
+ ret = cl_dsp_init(sdev, stripped_firmware.data,
+ stripped_firmware.size, tag);
+
+ /* don't retry anymore if successful */
+ if (!ret)
+ break;
+
+ dev_err(sdev->dev, "error: Error code=0x%x: FW status=0x%x\n",
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_ERROR),
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS));
+ dev_err(sdev->dev, "error: iteration %d of Core En/ROM load failed: %d\n",
+ i, ret);
+ }
+
+ if (i == HDA_FW_BOOT_ATTEMPTS) {
+ dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
+ i, ret);
+ goto cleanup;
+ }
+
+ /*
+ * at this point DSP ROM has been initialized and
+ * should be ready for code loading and firmware boot
+ */
+ ret = cl_copy_fw(sdev, stream);
+ if (!ret)
+ dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
+ else
+ dev_err(sdev->dev, "error: load fw failed ret: %d\n", ret);
+
+cleanup:
+ /*
+ * Perform codeloader stream cleanup.
+ * This should be done even if firmware loading fails.
+ */
+ ret1 = cl_cleanup(sdev, &sdev->dmab, stream);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
+
+ /* set return value to indicate cleanup failure */
+ ret = ret1;
+ }
+
+ /*
+ * return master core id if both fw copy
+ * and stream clean up are successful
+ */
+ if (!ret)
+ return chip_info->init_core_mask;
+
+ /* dump dsp registers and disable DSP upon error */
+err:
+ hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
+ SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+ return ret;
+}
+
+/* pre fw run operations */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ /* disable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, false);
+}
+
+/* post fw run operations */
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ /* re-enable clock gating and power gating */
+ return hda_dsp_ctrl_clock_power_gating(sdev, true);
+}
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
new file mode 100644
index 000000000000..9b730f183529
--- /dev/null
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hda_register.h>
+#include <sound/pcm_params.h>
+#include "../ops.h"
+#include "hda.h"
+
+#define SDnFMT_BASE(x) ((x) << 14)
+#define SDnFMT_MULT(x) (((x) - 1) << 11)
+#define SDnFMT_DIV(x) (((x) - 1) << 8)
+#define SDnFMT_BITS(x) ((x) << 4)
+#define SDnFMT_CHAN(x) ((x) << 0)
+
+static inline u32 get_mult_div(struct snd_sof_dev *sdev, int rate)
+{
+ switch (rate) {
+ case 8000:
+ return SDnFMT_DIV(6);
+ case 9600:
+ return SDnFMT_DIV(5);
+ case 11025:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(4);
+ case 16000:
+ return SDnFMT_DIV(3);
+ case 22050:
+ return SDnFMT_BASE(1) | SDnFMT_DIV(2);
+ case 32000:
+ return SDnFMT_DIV(3) | SDnFMT_MULT(2);
+ case 44100:
+ return SDnFMT_BASE(1);
+ case 48000:
+ return 0;
+ case 88200:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(2);
+ case 96000:
+ return SDnFMT_MULT(2);
+ case 176400:
+ return SDnFMT_BASE(1) | SDnFMT_MULT(4);
+ case 192000:
+ return SDnFMT_MULT(4);
+ default:
+ dev_warn(sdev->dev, "can't find div rate %d using 48kHz\n",
+ rate);
+ return 0; /* use 48KHz if not found */
+ }
+};
+
+static inline u32 get_bits(struct snd_sof_dev *sdev, int sample_bits)
+{
+ switch (sample_bits) {
+ case 8:
+ return SDnFMT_BITS(0);
+ case 16:
+ return SDnFMT_BITS(1);
+ case 20:
+ return SDnFMT_BITS(2);
+ case 24:
+ return SDnFMT_BITS(3);
+ case 32:
+ return SDnFMT_BITS(4);
+ default:
+ dev_warn(sdev->dev, "can't find %d bits using 16bit\n",
+ sample_bits);
+ return SDnFMT_BITS(1); /* use 16bits format if not found */
+ }
+};
+
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_dma_buffer *dmab;
+ int ret;
+ u32 size, rate, bits;
+
+ size = params_buffer_bytes(params);
+ rate = get_mult_div(sdev, params_rate(params));
+ bits = get_bits(sdev, params_width(params));
+
+ hstream->substream = substream;
+
+ dmab = substream->runtime->dma_buffer_p;
+
+ hstream->format_val = rate | bits | (params_channels(params) - 1);
+ hstream->bufsize = size;
+ hstream->period_bytes = params_period_bytes(params);
+ hstream->no_period_wakeup =
+ (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+ (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ return ret;
+ }
+
+ /* disable SPIB, to enable buffer wrap for stream */
+ hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
+
+ /* set host_period_bytes to 0 if no IPC position */
+ if (hda && hda->no_ipc_position)
+ ipc_params->host_period_bytes = 0;
+
+ ipc_params->stream_tag = hstream->stream_tag;
+
+ return 0;
+}
+
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
+
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+}
+
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t pos;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm) {
+ dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ if (hda && !hda->no_ipc_position) {
+ /* read position from IPC position */
+ pos = spcm->stream[substream->stream].posn.host_posn;
+ goto found;
+ }
+
+ /*
+ * DPIB/posbuf position mode:
+ * For Playback, Use DPIB register from HDA space which
+ * reflects the actual data transferred.
+ * For Capture, Use the position buffer for pointer, as DPIB
+ * is not accurate enough, its update may be completed
+ * earlier than the data written to DDR.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ } else {
+ /*
+ * For capture stream, we need more workaround to fix the
+ * position incorrect issue:
+ *
+ * 1. Wait at least 20us before reading position buffer after
+ * the interrupt generated(IOC), to make sure position update
+ * happens on frame boundary i.e. 20.833uSec for 48KHz.
+ * 2. Perform a dummy Read to DPIB register to flush DMA
+ * position value.
+ * 3. Read the DMA Position from posbuf. Now the readback
+ * value should be >= period boundary.
+ */
+ usleep_range(20, 21);
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ AZX_REG_VS_SDXDPIB_XBASE +
+ (AZX_REG_VS_SDXDPIB_XINTERVAL *
+ hstream->index));
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+ }
+
+ if (pos >= hstream->bufsize)
+ pos = 0;
+
+found:
+ pos = bytes_to_frames(substream->runtime, pos);
+
+ dev_vdbg(sdev->dev, "PCM: stream %d dir %d position %lu\n",
+ hstream->index, substream->stream, pos);
+ return pos;
+}
+
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_ext_stream *dsp_stream;
+ int direction = substream->stream;
+
+ dsp_stream = hda_dsp_stream_get(sdev, direction);
+
+ if (!dsp_stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = &dsp_stream->hstream;
+ return 0;
+}
+
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct hdac_stream *hstream = substream->runtime->private_data;
+ int direction = substream->stream;
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
+
+ if (ret) {
+ dev_dbg(sdev->dev, "stream %s not opened!\n", substream->name);
+ return -ENODEV;
+ }
+
+ /* unbinding pcm substream to hda stream */
+ substream->runtime->private_data = NULL;
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
new file mode 100644
index 000000000000..c92006f89499
--- /dev/null
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/pm_runtime.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_register.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "hda.h"
+
+/*
+ * set up one of BDL entries for a stream
+ */
+static int hda_setup_bdle(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream,
+ struct sof_intel_dsp_bdl **bdlp,
+ int offset, int size, int ioc)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sof_intel_dsp_bdl *bdl = *bdlp;
+
+ while (size > 0) {
+ dma_addr_t addr;
+ int chunk;
+
+ if (stream->frags >= HDA_DSP_MAX_BDL_ENTRIES) {
+ dev_err(sdev->dev, "error: stream frags exceeded\n");
+ return -EINVAL;
+ }
+
+ addr = snd_sgbuf_get_addr(dmab, offset);
+ /* program BDL addr */
+ bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
+ bdl->addr_h = cpu_to_le32(upper_32_bits(addr));
+ /* program BDL size */
+ chunk = snd_sgbuf_get_chunk_size(dmab, offset, size);
+ /* one BDLE should not cross 4K boundary */
+ if (bus->align_bdle_4k) {
+ u32 remain = 0x1000 - (offset & 0xfff);
+
+ if (chunk > remain)
+ chunk = remain;
+ }
+ bdl->size = cpu_to_le32(chunk);
+ /* only program IOC when the whole segment is processed */
+ size -= chunk;
+ bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
+ bdl++;
+ stream->frags++;
+ offset += chunk;
+
+ dev_vdbg(sdev->dev, "bdl, frags:%d, chunk size:0x%x;\n",
+ stream->frags, chunk);
+ }
+
+ *bdlp = bdl;
+ return offset;
+}
+
+/*
+ * set up Buffer Descriptor List (BDL) for host memory transfer
+ * BDL describes the location of the individual buffers and is little endian.
+ */
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct sof_intel_dsp_bdl *bdl;
+ int i, offset, period_bytes, periods;
+ int remain, ioc;
+
+ period_bytes = stream->period_bytes;
+ dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
+ if (!period_bytes)
+ period_bytes = stream->bufsize;
+
+ periods = stream->bufsize / period_bytes;
+
+ dev_dbg(sdev->dev, "periods:%d\n", periods);
+
+ remain = stream->bufsize % period_bytes;
+ if (remain)
+ periods++;
+
+ /* program the initial BDL entries */
+ bdl = (struct sof_intel_dsp_bdl *)stream->bdl.area;
+ offset = 0;
+ stream->frags = 0;
+
+ /*
+ * set IOC if don't use position IPC
+ * and period_wakeup needed.
+ */
+ ioc = hda->no_ipc_position ?
+ !stream->no_period_wakeup : 0;
+
+ for (i = 0; i < periods; i++) {
+ if (i == (periods - 1) && remain)
+ /* set the last small entry */
+ offset = hda_setup_bdle(sdev, dmab,
+ stream, &bdl, offset,
+ remain, 0);
+ else
+ offset = hda_setup_bdle(sdev, dmab,
+ stream, &bdl, offset,
+ period_bytes, ioc);
+ }
+
+ return offset;
+}
+
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ int enable, u32 size)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ u32 mask;
+
+ if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
+ dev_err(sdev->dev, "error: address of spib capability is NULL\n");
+ return -EINVAL;
+ }
+
+ mask = (1 << hstream->index);
+
+ /* enable/disable SPIB for the stream */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
+ SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
+ enable << hstream->index);
+
+ /* set the SPIB value */
+ sof_io_write(sdev, stream->spib_addr, size);
+
+ return 0;
+}
+
+/* get next unused stream */
+struct hdac_ext_stream *
+hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_stream *stream = NULL;
+ struct hdac_stream *s;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /* get an unused stream */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == direction && !s->opened) {
+ s->opened = true;
+ stream = stream_to_hdac_ext_stream(s);
+ break;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ /* stream found ? */
+ if (!stream)
+ dev_err(sdev->dev, "error: no free %s streams\n",
+ direction == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+
+ return stream;
+}
+
+/* free a stream */
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s;
+
+ spin_lock_irq(&bus->reg_lock);
+
+ /* find used stream */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (s->direction == direction &&
+ s->opened && s->stream_tag == stream_tag) {
+ s->opened = false;
+ spin_unlock_irq(&bus->reg_lock);
+ return 0;
+ }
+ }
+
+ spin_unlock_irq(&bus->reg_lock);
+
+ dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+ return -ENODEV;
+}
+
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd)
+{
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+
+ /* cmd must be for audio stream */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_START:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index,
+ 1 << hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = true;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset,
+ SOF_HDA_SD_CTL_DMA_START |
+ SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->running = false;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ 1 << hstream->index, 0x0);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare for common hdac registers settings, for both code loader
+ * and normal stream.
+ */
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *hstream = &stream->hstream;
+ int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ int ret, timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ u32 val, mask;
+
+ if (!stream) {
+ dev_err(sdev->dev, "error: no stream available\n");
+ return -ENODEV;
+ }
+
+ /* decouple host and link DMA */
+ mask = 0x1 << hstream->index;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* stream reset */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 0x1,
+ 0x1);
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if (val & 0x1)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "error: stream reset failed\n");
+ return -ETIMEDOUT;
+ }
+
+ timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 0x1,
+ 0x0);
+
+ /* wait for hardware to report that stream is out of reset */
+ udelay(3);
+ do {
+ val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset);
+ if ((val & 0x1) == 0)
+ break;
+ } while (--timeout);
+ if (timeout == 0) {
+ dev_err(sdev->dev, "error: timeout waiting for stream reset\n");
+ return -ETIMEDOUT;
+ }
+
+ if (hstream->posbuf)
+ *hstream->posbuf = 0;
+
+ /* reset BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ 0x0);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ 0x0);
+
+ /* clear stream status */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK |
+ SOF_HDA_SD_CTL_DMA_START, 0);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ hstream->frags = 0;
+
+ ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set up of BDL failed\n");
+ return ret;
+ }
+
+ /* program stream tag to set up stream descriptor for DMA */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
+ hstream->stream_tag <<
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
+
+ /* program cyclic buffer length */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
+ hstream->bufsize);
+
+ /*
+ * Recommended hardware programming sequence for HDAudio DMA format
+ *
+ * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit
+ * for corresponding stream index before the time of writing
+ * format to SDxFMT register.
+ * 2. Write SDxFMT
+ * 3. Set PPCTL.PROCEN bit for corresponding stream index to
+ * enable decoupled mode
+ */
+
+ /* couple host and link DMA, disable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, 0);
+
+ /* program stream format */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FORMAT,
+ 0xffff, hstream->format_val);
+
+ /* decouple host and link DMA, enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
+ /* program last valid index */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
+ 0xffff, (hstream->frags - 1));
+
+ /* program BDL address */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
+ (u32)hstream->bdl.addr);
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
+ sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
+ upper_32_bits(hstream->bdl.addr));
+
+ /* enable position buffer */
+ if (!(snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE)
+ & SOF_HDA_ADSP_DPLBASE_ENABLE)) {
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
+ upper_32_bits(bus->posbuf.addr));
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
+ (u32)bus->posbuf.addr |
+ SOF_HDA_ADSP_DPLBASE_ENABLE);
+ }
+
+ /* set interrupt enable bits */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+
+ /* read FIFO size */
+ if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ hstream->fifo_size =
+ snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE);
+ hstream->fifo_size &= 0xffff;
+ hstream->fifo_size += 1;
+ } else {
+ hstream->fifo_size = 0;
+ }
+
+ return ret;
+}
+
+irqreturn_t hda_dsp_stream_interrupt(int irq, void *context)
+{
+ struct hdac_bus *bus = context;
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ u32 stream_mask;
+ u32 status;
+
+ if (!pm_runtime_active(bus->dev))
+ return IRQ_NONE;
+
+ spin_lock(&bus->reg_lock);
+
+ status = snd_hdac_chip_readl(bus, INTSTS);
+ stream_mask = GENMASK(sof_hda->stream_max - 1, 0) | AZX_INT_CTRL_EN;
+
+ /* Not stream interrupt or register inaccessible, ignore it.*/
+ if (!(status & stream_mask) || status == 0xffffffff) {
+ spin_unlock(&bus->reg_lock);
+ return IRQ_NONE;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* clear rirb int */
+ status = snd_hdac_chip_readb(bus, RIRBSTS);
+ if (status & RIRB_INT_MASK) {
+ if (status & RIRB_INT_RESPONSE)
+ snd_hdac_bus_update_rirb(bus);
+ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+ }
+#endif
+
+ spin_unlock(&bus->reg_lock);
+
+ return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
+{
+ struct hdac_bus *bus = context;
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ u32 status = snd_hdac_chip_readl(bus, INTSTS);
+ struct hdac_stream *s;
+ u32 sd_status;
+
+ /* check streams */
+ list_for_each_entry(s, &bus->stream_list, list) {
+ if (status & (1 << s->index) && s->opened) {
+ sd_status = snd_hdac_stream_readb(s, SD_STS);
+
+ dev_vdbg(bus->dev, "stream %d status 0x%x\n",
+ s->index, sd_status);
+
+ snd_hdac_stream_writeb(s, SD_STS, SD_INT_MASK);
+
+ if (!s->substream ||
+ !s->running ||
+ (sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
+ continue;
+
+ /* Inform ALSA only in case not do that with IPC */
+ if (sof_hda->no_ipc_position)
+ snd_sof_pcm_period_elapsed(s->substream);
+
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_stream *stream;
+ struct hdac_stream *hstream;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
+ int sd_offset;
+ int i, num_playback, num_capture, num_total, ret;
+ u32 gcap;
+
+ gcap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCAP);
+ dev_dbg(sdev->dev, "hda global caps = 0x%x\n", gcap);
+
+ /* get stream count from GCAP */
+ num_capture = (gcap >> 8) & 0x0f;
+ num_playback = (gcap >> 12) & 0x0f;
+ num_total = num_playback + num_capture;
+
+ dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
+ num_playback, num_capture);
+
+ if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
+ dev_err(sdev->dev, "error: too many playback streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ dev_err(sdev->dev, "error: too many capture streams %d\n",
+ num_playback);
+ return -EINVAL;
+ }
+
+ /*
+ * mem alloc for the position buffer
+ * TODO: check position buffer update
+ */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ SOF_HDA_DPIB_ENTRY_SIZE * num_total,
+ &bus->posbuf);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: posbuffer dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* mem alloc for the CORB/RIRB ringbuffers */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ PAGE_SIZE, &bus->rb);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: RB alloc failed\n");
+ return -ENOMEM;
+ }
+#endif
+
+ /* create capture streams */
+ for (i = 0; i < num_capture; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ stream = &hda_stream->hda_stream;
+
+ stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* memory alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* create playback streams */
+ for (i = num_capture; i < num_total; i++) {
+ struct sof_intel_hda_stream *hda_stream;
+
+ hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
+ GFP_KERNEL);
+ if (!hda_stream)
+ return -ENOMEM;
+
+ stream = &hda_stream->hda_stream;
+
+ /* we always have DSP support */
+ stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
+
+ stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
+ SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
+ SOF_HDA_PPLC_INTERVAL * i;
+
+ /* do we support SPIB */
+ if (sdev->bar[HDA_DSP_SPIB_BAR]) {
+ stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_SPIB;
+
+ stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
+ SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
+ SOF_HDA_SPIB_MAXFIFO;
+ }
+
+ hstream = &stream->hstream;
+ hstream->bus = bus;
+ hstream->sd_int_sta_mask = 1 << i;
+ hstream->index = i;
+ sd_offset = SOF_STREAM_SD_OFFSET(hstream);
+ hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
+ hstream->stream_tag = i - num_capture + 1;
+ hstream->opened = false;
+ hstream->running = false;
+ hstream->direction = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* mem alloc for stream BDL */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ HDA_DSP_BDL_SIZE, &hstream->bdl);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hstream->posbuf = (__le32 *)(bus->posbuf.area +
+ (hstream->index) * 8);
+
+ list_add_tail(&hstream->list, &bus->stream_list);
+ }
+
+ /* store total stream count (playback + capture) from GCAP */
+ sof_hda->stream_max = num_total;
+
+ return 0;
+}
+
+void hda_dsp_stream_free(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_stream *s, *_s;
+ struct hdac_ext_stream *stream;
+ struct sof_intel_hda_stream *hda_stream;
+
+ /* free position buffer */
+ if (bus->posbuf.area)
+ snd_dma_free_pages(&bus->posbuf);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* free position buffer */
+ if (bus->rb.area)
+ snd_dma_free_pages(&bus->rb);
+#endif
+
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
+ /* TODO: decouple */
+
+ /* free bdl buffer */
+ if (s->bdl.area)
+ snd_dma_free_pages(&s->bdl);
+ list_del(&s->list);
+ stream = stream_to_hdac_ext_stream(s);
+ hda_stream = container_of(stream, struct sof_intel_hda_stream,
+ hda_stream);
+ devm_kfree(sdev->dev, hda_stream);
+ }
+}
diff --git a/sound/soc/sof/intel/hda-trace.c b/sound/soc/sof/intel/hda-trace.c
new file mode 100644
index 000000000000..33b23bd6a01e
--- /dev/null
+++ b/sound/soc/sof/intel/hda-trace.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <sound/hdaudio_ext.h>
+#include "../ops.h"
+#include "hda.h"
+
+static int hda_dsp_trace_prepare(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_ext_stream *stream = hda->dtrace_stream;
+ struct hdac_stream *hstream = &stream->hstream;
+ struct snd_dma_buffer *dmab = &sdev->dmatb;
+ int ret;
+
+ hstream->period_bytes = 0;/* initialize period_bytes */
+ hstream->bufsize = sdev->dmatb.bytes;
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, NULL);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+
+ return ret;
+}
+
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, u32 *stream_tag)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ int ret;
+
+ hda->dtrace_stream = hda_dsp_stream_get(sdev,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ if (!hda->dtrace_stream) {
+ dev_err(sdev->dev,
+ "error: no available capture stream for DMA trace\n");
+ return -ENODEV;
+ }
+
+ *stream_tag = hda->dtrace_stream->hstream.stream_tag;
+
+ /*
+ * initialize capture stream, set BDL address and return corresponding
+ * stream tag which will be sent to the firmware by IPC message.
+ */
+ ret = hda_dsp_trace_prepare(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac trace init failed: %x\n", ret);
+ hda_dsp_stream_put(sdev, SNDRV_PCM_STREAM_CAPTURE, *stream_tag);
+ hda->dtrace_stream = NULL;
+ *stream_tag = 0;
+ }
+
+ return ret;
+}
+
+int hda_dsp_trace_release(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_stream *hstream;
+
+ if (hda->dtrace_stream) {
+ hstream = &hda->dtrace_stream->hstream;
+ hda_dsp_stream_put(sdev,
+ SNDRV_PCM_STREAM_CAPTURE,
+ hstream->stream_tag);
+ hda->dtrace_stream = NULL;
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "DMA trace stream is not opened!\n");
+ return -ENODEV;
+}
+
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+
+ return hda_dsp_stream_trigger(sdev, hda->dtrace_stream, cmd);
+}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
new file mode 100644
index 000000000000..7e3980a2f7ba
--- /dev/null
+++ b/sound/soc/sof/intel/hda.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+// Rander Wang <rander.wang@intel.com>
+// Keyon Jie <yang.jie@linux.intel.com>
+//
+
+/*
+ * Hardware interface for generic Intel audio DSP HDA IP
+ */
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../ops.h"
+#include "hda.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+#include "../../codecs/hdac_hda.h"
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#include <sound/soc-acpi-intel-match.h>
+#endif
+
+/* platform specific devices */
+#include "shim.h"
+
+/*
+ * Debug
+ */
+
+struct hda_dsp_msg_code {
+ u32 code;
+ const char *msg;
+};
+
+static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
+ {HDA_DSP_ROM_FW_MANIFEST_LOADED, "status: manifest loaded"},
+ {HDA_DSP_ROM_FW_FW_LOADED, "status: fw loaded"},
+ {HDA_DSP_ROM_FW_ENTERED, "status: fw entered"},
+ {HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
+ {HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
+ {HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
+ {HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
+ {HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
+ {HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
+ {HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
+ {HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
+ {HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
+ {HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
+ {HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
+ {HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
+ {HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
+ {HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
+ {HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
+ {HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
+};
+
+static void hda_dsp_get_status_skl(struct snd_sof_dev *sdev)
+{
+ u32 status;
+ int i;
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_FW_STATUS_SKL);
+
+ for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
+ if (status == hda_dsp_rom_msg[i].code) {
+ dev_err(sdev->dev, "%s - code %8.8x\n",
+ hda_dsp_rom_msg[i].msg, status);
+ return;
+ }
+ }
+
+ /* not for us, must be generic sof message */
+ dev_dbg(sdev->dev, "unknown ROM status value %8.8x\n", status);
+}
+
+static void hda_dsp_get_status(struct snd_sof_dev *sdev)
+{
+ u32 status;
+ int i;
+
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_ROM_STATUS);
+
+ for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
+ if (status == hda_dsp_rom_msg[i].code) {
+ dev_err(sdev->dev, "%s - code %8.8x\n",
+ hda_dsp_rom_msg[i].msg, status);
+ return;
+ }
+ }
+
+ /* not for us, must be generic sof message */
+ dev_dbg(sdev->dev, "unknown ROM status value %8.8x\n", status);
+}
+
+static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
+ struct sof_ipc_dsp_oops_xtensa *xoops,
+ struct sof_ipc_panic_info *panic_info,
+ u32 *stack, size_t stack_words)
+{
+ /* first read registers */
+ sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset, xoops,
+ sizeof(*xoops));
+
+ /* then get panic info */
+ sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
+ sizeof(*xoops), panic_info, sizeof(*panic_info));
+
+ /* then get the stack */
+ sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
+ sizeof(*xoops) + sizeof(*panic_info), stack,
+ stack_words * sizeof(u32));
+}
+
+void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* try APL specific status message types first */
+ hda_dsp_get_status_skl(sdev);
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_ERROR_CODE_SKL);
+
+ /*TODO: Check: there is no define in spec, but it is used in the code*/
+ panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_ADSP_ERROR_CODE_SKL + 0x4);
+
+ if (sdev->boot_complete) {
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info,
+ stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ dev_err(sdev->dev, "error: status = 0x%8.8x panic = 0x%8.8x\n",
+ status, panic);
+ hda_dsp_get_status_skl(sdev);
+ }
+}
+
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_dsp_oops_xtensa xoops;
+ struct sof_ipc_panic_info panic_info;
+ u32 stack[HDA_DSP_STACK_DUMP_SIZE];
+ u32 status, panic;
+
+ /* try APL specific status message types first */
+ hda_dsp_get_status(sdev);
+
+ /* now try generic SOF status messages */
+ status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_SRAM_REG_FW_STATUS);
+ panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
+
+ if (sdev->boot_complete) {
+ hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
+ HDA_DSP_STACK_DUMP_SIZE);
+ snd_sof_get_status(sdev, status, panic, &xoops, &panic_info,
+ stack, HDA_DSP_STACK_DUMP_SIZE);
+ } else {
+ dev_err(sdev->dev, "error: status = 0x%8.8x panic = 0x%8.8x\n",
+ status, panic);
+ hda_dsp_get_status(sdev);
+ }
+}
+
+void hda_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcie;
+ u32 hipct;
+ u32 hipcctl;
+
+ /* read IPC status */
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
+ hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcie, hipct, hipcctl);
+}
+
+static int hda_init(struct snd_sof_dev *sdev)
+{
+ struct hda_bus *hbus;
+ struct hdac_bus *bus;
+ struct hdac_ext_bus_ops *ext_ops = NULL;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ int ret;
+
+ hbus = sof_to_hbus(sdev);
+ bus = sof_to_bus(sdev);
+
+ /* HDA bus init */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ ext_ops = snd_soc_hdac_hda_get_ops();
+#endif
+ sof_hda_bus_init(bus, &pci->dev, ext_ops);
+ bus->use_posbuf = 1;
+ bus->bdl_pos_adj = 0;
+
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = "sofbus";
+
+ /* initialise hdac bus */
+ bus->addr = pci_resource_start(pci, 0);
+ bus->remap_addr = pci_ioremap_bar(pci, 0);
+ if (!bus->remap_addr) {
+ dev_err(bus->dev, "error: ioremap error\n");
+ return -ENXIO;
+ }
+
+ /* HDA base */
+ sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
+
+ /* get controller capabilities */
+ ret = hda_dsp_ctrl_get_caps(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: get caps error\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
+ const char *sof_tplg_filename)
+{
+ const char *tplg_filename = NULL;
+ char *filename;
+ char *split_ext;
+
+ filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
+ if (!filename)
+ return NULL;
+
+ /* this assumes a .tplg extension */
+ split_ext = strsep(&filename, ".");
+ if (split_ext) {
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s-idisp.tplg", split_ext);
+ if (!tplg_filename)
+ return NULL;
+ }
+ return tplg_filename;
+}
+
+static int hda_init_caps(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink;
+ struct snd_soc_acpi_mach_params *mach_params;
+ struct snd_soc_acpi_mach *hda_mach;
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ struct snd_soc_acpi_mach *mach;
+ const char *tplg_filename;
+ int codec_num = 0;
+ int ret = 0;
+ int i;
+
+ device_disable_async_suspend(bus->dev);
+
+ /* check if dsp is there */
+ if (bus->ppcap)
+ dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
+
+ if (bus->mlcap)
+ snd_hdac_ext_bus_get_ml_capabilities(bus);
+
+ /* init i915 and HDMI codecs */
+ ret = hda_codec_i915_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: no HDMI audio devices found\n");
+ return ret;
+ }
+
+ ret = hda_dsp_ctrl_init_chip(sdev, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "error: init chip failed with ret: %d\n", ret);
+ goto out;
+ }
+
+ /* codec detection */
+ if (!bus->codec_mask) {
+ dev_info(bus->dev, "no hda codecs found!\n");
+ } else {
+ dev_info(bus->dev, "hda codecs found, mask %lx\n",
+ bus->codec_mask);
+
+ for (i = 0; i < HDA_MAX_CODECS; i++) {
+ if (bus->codec_mask & (1 << i))
+ codec_num++;
+ }
+
+ /*
+ * If no machine driver is found, then:
+ *
+ * hda machine driver is used if :
+ * 1. there is one HDMI codec and one external HDAudio codec
+ * 2. only HDMI codec
+ */
+ if (!pdata->machine && codec_num <= 2 &&
+ HDA_IDISP_CODEC(bus->codec_mask)) {
+ hda_mach = snd_soc_acpi_intel_hda_machines;
+ pdata->machine = hda_mach;
+
+ /* topology: use the info from hda_machines */
+ pdata->tplg_filename =
+ hda_mach->sof_tplg_filename;
+
+ /* firmware: pick the first in machine list */
+ mach = pdata->desc->machines;
+ pdata->fw_filename = mach->sof_fw_filename;
+
+ dev_info(bus->dev, "using HDA machine driver %s now\n",
+ hda_mach->drv_name);
+
+ /* fixup topology file for HDMI only platforms */
+ if (codec_num == 1) {
+ /* use local variable for readability */
+ tplg_filename = pdata->tplg_filename;
+ tplg_filename = fixup_tplg_name(sdev, tplg_filename);
+ if (!tplg_filename)
+ goto out;
+ pdata->tplg_filename = tplg_filename;
+ }
+ }
+ }
+
+ /* used by hda machine driver to create dai links */
+ if (pdata->machine) {
+ mach_params = (struct snd_soc_acpi_mach_params *)
+ &pdata->machine->mach_params;
+ mach_params->codec_mask = bus->codec_mask;
+ mach_params->platform = dev_name(sdev->dev);
+ }
+
+ /* create codec instances */
+ hda_codec_probe_bus(sdev);
+
+ hda_codec_i915_put(sdev);
+
+ /*
+ * we are done probing so decrement link counts
+ */
+ list_for_each_entry(hlink, &bus->hlink_list, list)
+ snd_hdac_ext_bus_link_put(bus, hlink);
+
+ return 0;
+
+out:
+ hda_codec_i915_exit(sdev);
+ return ret;
+}
+
+#else
+
+static int hda_init_caps(struct snd_sof_dev *sdev)
+{
+ /*
+ * set CGCTL.MISCBDCGE to 0 during reset and set back to 1
+ * when reset finished.
+ * TODO: maybe no need for init_caps?
+ */
+ hda_dsp_ctrl_misc_clock_gating(sdev, 0);
+
+ /* clear WAKESTS */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+ return 0;
+}
+
+#endif
+
+static const struct sof_intel_dsp_desc
+ *get_chip_info(struct snd_sof_pdata *pdata)
+{
+ const struct sof_dev_desc *desc = pdata->desc;
+ const struct sof_intel_dsp_desc *chip_info;
+
+ chip_info = desc->chip_info;
+
+ return chip_info;
+}
+
+int hda_dsp_probe(struct snd_sof_dev *sdev)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ struct sof_intel_hda_dev *hdev;
+ struct hdac_bus *bus;
+ struct hdac_stream *stream;
+ const struct sof_intel_dsp_desc *chip;
+ int sd_offset, ret = 0;
+
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, legacy driver is required
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: either of DSP or legacy mode works
+ */
+ if (pci->class == 0x040300) {
+ dev_err(sdev->dev, "error: the DSP is not enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ } else if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(sdev->dev, "error: unknown PCI class/subclass/prog-if 0x%06x found, aborting probe\n", pci->class);
+ return -ENODEV;
+ }
+ dev_info(sdev->dev, "DSP detected with PCI class/subclass/prog-if 0x%06x\n", pci->class);
+
+ chip = get_chip_info(sdev->pdata);
+ if (!chip) {
+ dev_err(sdev->dev, "error: no such device supported, chip id:%x\n",
+ pci->device);
+ ret = -EIO;
+ goto err;
+ }
+
+ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+ sdev->pdata->hw_pdata = hdev;
+ hdev->desc = chip;
+
+ hdev->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(hdev->dmic_dev)) {
+ dev_err(sdev->dev, "error: failed to create DMIC device\n");
+ return PTR_ERR(hdev->dmic_dev);
+ }
+
+ /*
+ * use position update IPC if either it is forced
+ * or we don't have other choice
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION)
+ hdev->no_ipc_position = 0;
+#else
+ hdev->no_ipc_position = sof_ops(sdev)->pcm_pointer ? 1 : 0;
+#endif
+
+ /* set up HDA base */
+ bus = sof_to_bus(sdev);
+ ret = hda_init(sdev);
+ if (ret < 0)
+ goto hdac_bus_unmap;
+
+ /* DSP base */
+ sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+ if (!sdev->bar[HDA_DSP_BAR]) {
+ dev_err(sdev->dev, "error: ioremap error\n");
+ ret = -ENXIO;
+ goto hdac_bus_unmap;
+ }
+
+ sdev->mmio_bar = HDA_DSP_BAR;
+ sdev->mailbox_bar = HDA_DSP_BAR;
+
+ /* allow 64bit DMA address if supported by H/W */
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(sdev->dev, "DMA mask is 64 bit\n");
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(64));
+ } else {
+ dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
+ dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
+ }
+
+ /* init streams */
+ ret = hda_dsp_stream_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to init streams\n");
+ /*
+ * not all errors are due to memory issues, but trying
+ * to free everything does not harm
+ */
+ goto free_streams;
+ }
+
+ /*
+ * register our IRQ
+ * let's try to enable msi firstly
+ * if it fails, use legacy interrupt mode
+ * TODO: support interrupt mode selection with kernel parameter
+ * support msi multiple vectors
+ */
+ ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_info(sdev->dev, "use legacy interrupt mode\n");
+ /*
+ * in IO-APIC mode, hda->irq and ipc_irq are using the same
+ * irq number of pci->irq
+ */
+ hdev->irq = pci->irq;
+ sdev->ipc_irq = pci->irq;
+ sdev->msi_enabled = 0;
+ } else {
+ dev_info(sdev->dev, "use msi interrupt mode\n");
+ hdev->irq = pci_irq_vector(pci, 0);
+ /* ipc irq number is the same of hda irq */
+ sdev->ipc_irq = hdev->irq;
+ sdev->msi_enabled = 1;
+ }
+
+ dev_dbg(sdev->dev, "using HDA IRQ %d\n", hdev->irq);
+ ret = request_threaded_irq(hdev->irq, hda_dsp_stream_interrupt,
+ hda_dsp_stream_threaded_handler,
+ IRQF_SHARED, "AudioHDA", bus);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register HDA IRQ %d\n",
+ hdev->irq);
+ goto free_irq_vector;
+ }
+
+ dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
+ ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_ipc_irq_handler,
+ sof_ops(sdev)->irq_thread, IRQF_SHARED,
+ "AudioDSP", sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
+ sdev->ipc_irq);
+ goto free_hda_irq;
+ }
+
+ pci_set_master(pci);
+ synchronize_irq(pci->irq);
+
+ /*
+ * clear TCSEL to clear playback on some HD Audio
+ * codecs. PCI TCSEL is defined in the Intel manuals.
+ */
+ snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
+
+ /* init HDA capabilities */
+ ret = hda_init_caps(sdev);
+ if (ret < 0)
+ goto free_ipc_irq;
+
+ /* reset HDA controller */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+ goto free_ipc_irq;
+ }
+
+ /* exit HDA controller reset */
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ goto free_ipc_irq;
+ }
+
+ /* clear stream status */
+ list_for_each_entry(stream, &bus->stream_list, list) {
+ sd_offset = SOF_STREAM_SD_OFFSET(stream);
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ sd_offset +
+ SOF_HDA_ADSP_REG_CL_SD_STS,
+ SOF_HDA_CL_DMA_SD_INT_MASK,
+ SOF_HDA_CL_DMA_SD_INT_MASK);
+ }
+
+ /* clear WAKESTS */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+ SOF_HDA_WAKESTS_INT_MASK,
+ SOF_HDA_WAKESTS_INT_MASK);
+
+ /* clear interrupt status register */
+ snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+ /* enable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+ /* re-enable CGCTL.MISCBDCGE after reset */
+ hda_dsp_ctrl_misc_clock_gating(sdev, true);
+
+ device_disable_async_suspend(&pci->dev);
+
+ /* enable DSP features */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, SOF_HDA_PPCTL_GPROCEN);
+
+ /* enable DSP IRQ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, SOF_HDA_PPCTL_PIE);
+
+ /* initialize waitq for code loading */
+ init_waitqueue_head(&sdev->waitq);
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+
+ return 0;
+
+free_ipc_irq:
+ free_irq(sdev->ipc_irq, sdev);
+free_hda_irq:
+ free_irq(hdev->irq, bus);
+free_irq_vector:
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+free_streams:
+ hda_dsp_stream_free(sdev);
+/* dsp_unmap: not currently used */
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+hdac_bus_unmap:
+ iounmap(bus->remap_addr);
+err:
+ return ret;
+}
+
+int hda_dsp_remove(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(bus);
+#endif
+
+ if (!IS_ERR_OR_NULL(hda->dmic_dev))
+ platform_device_unregister(hda->dmic_dev);
+
+ /* disable DSP IRQ */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_PIE, 0);
+
+ /* disable CIE and GIE interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+ SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN, 0);
+
+ /* disable cores */
+ if (chip)
+ hda_dsp_core_reset_power_down(sdev, chip->cores_mask);
+
+ /* disable DSP */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ SOF_HDA_PPCTL_GPROCEN, 0);
+
+ free_irq(sdev->ipc_irq, sdev);
+ free_irq(hda->irq, bus);
+ if (sdev->msi_enabled)
+ pci_free_irq_vectors(pci);
+
+ hda_dsp_stream_free(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_link_free_all(bus);
+#endif
+
+ iounmap(sdev->bar[HDA_DSP_BAR]);
+ iounmap(bus->remap_addr);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_ext_bus_exit(bus);
+#endif
+ hda_codec_i915_exit(sdev);
+
+ return 0;
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
new file mode 100644
index 000000000000..92d45c43b4b1
--- /dev/null
+++ b/sound/soc/sof/intel/hda.h
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_H
+#define __SOF_INTEL_HDA_H
+
+#include <sound/hda_codec.h>
+#include <sound/hdaudio_ext.h>
+#include "shim.h"
+
+/* PCI registers */
+#define PCI_TCSEL 0x44
+#define PCI_PGCTL PCI_TCSEL
+#define PCI_CGCTL 0x48
+
+/* PCI_PGCTL bits */
+#define PCI_PGCTL_ADSPPGD BIT(2)
+#define PCI_PGCTL_LSRMD_MASK BIT(4)
+
+/* PCI_CGCTL bits */
+#define PCI_CGCTL_MISCBDCGE_MASK BIT(6)
+#define PCI_CGCTL_ADSPDCGE BIT(1)
+
+/* Legacy HDA registers and bits used - widths are variable */
+#define SOF_HDA_GCAP 0x0
+#define SOF_HDA_GCTL 0x8
+/* accept unsol. response enable */
+#define SOF_HDA_GCTL_UNSOL BIT(8)
+#define SOF_HDA_LLCH 0x14
+#define SOF_HDA_INTCTL 0x20
+#define SOF_HDA_INTSTS 0x24
+#define SOF_HDA_WAKESTS 0x0E
+#define SOF_HDA_WAKESTS_INT_MASK ((1 << 8) - 1)
+#define SOF_HDA_RIRBSTS 0x5d
+#define SOF_HDA_VS_EM2_L1SEN BIT(13)
+
+/* SOF_HDA_GCTL register bist */
+#define SOF_HDA_GCTL_RESET BIT(0)
+
+/* SOF_HDA_INCTL and SOF_HDA_INTSTS regs */
+#define SOF_HDA_INT_GLOBAL_EN BIT(31)
+#define SOF_HDA_INT_CTRL_EN BIT(30)
+#define SOF_HDA_INT_ALL_STREAM 0xff
+
+#define SOF_HDA_MAX_CAPS 10
+#define SOF_HDA_CAP_ID_OFF 16
+#define SOF_HDA_CAP_ID_MASK GENMASK(SOF_HDA_CAP_ID_OFF + 11,\
+ SOF_HDA_CAP_ID_OFF)
+#define SOF_HDA_CAP_NEXT_MASK 0xFFFF
+
+#define SOF_HDA_GTS_CAP_ID 0x1
+#define SOF_HDA_ML_CAP_ID 0x2
+
+#define SOF_HDA_PP_CAP_ID 0x3
+#define SOF_HDA_REG_PP_PPCH 0x10
+#define SOF_HDA_REG_PP_PPCTL 0x04
+#define SOF_HDA_PPCTL_PIE BIT(31)
+#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+
+/* DPIB entry size: 8 Bytes = 2 DWords */
+#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
+
+#define SOF_HDA_SPIB_CAP_ID 0x4
+#define SOF_HDA_DRSM_CAP_ID 0x5
+
+#define SOF_HDA_SPIB_BASE 0x08
+#define SOF_HDA_SPIB_INTERVAL 0x08
+#define SOF_HDA_SPIB_SPIB 0x00
+#define SOF_HDA_SPIB_MAXFIFO 0x04
+
+#define SOF_HDA_PPHC_BASE 0x10
+#define SOF_HDA_PPHC_INTERVAL 0x10
+
+#define SOF_HDA_PPLC_BASE 0x10
+#define SOF_HDA_PPLC_MULTI 0x10
+#define SOF_HDA_PPLC_INTERVAL 0x10
+
+#define SOF_HDA_DRSM_BASE 0x08
+#define SOF_HDA_DRSM_INTERVAL 0x08
+
+/* Descriptor error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_DESC_ERR 0x10
+
+/* FIFO error interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_FIFO_ERR 0x08
+
+/* Buffer completion interrupt */
+#define SOF_HDA_CL_DMA_SD_INT_COMPLETE 0x04
+
+#define SOF_HDA_CL_DMA_SD_INT_MASK \
+ (SOF_HDA_CL_DMA_SD_INT_DESC_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_FIFO_ERR | \
+ SOF_HDA_CL_DMA_SD_INT_COMPLETE)
+#define SOF_HDA_SD_CTL_DMA_START 0x02 /* Stream DMA start bit */
+
+/* Intel HD Audio Code Loader DMA Registers */
+#define SOF_HDA_ADSP_LOADER_BASE 0x80
+#define SOF_HDA_ADSP_DPLBASE 0x70
+#define SOF_HDA_ADSP_DPUBASE 0x74
+#define SOF_HDA_ADSP_DPLBASE_ENABLE 0x01
+
+/* Stream Registers */
+#define SOF_HDA_ADSP_REG_CL_SD_CTL 0x00
+#define SOF_HDA_ADSP_REG_CL_SD_STS 0x03
+#define SOF_HDA_ADSP_REG_CL_SD_LPIB 0x04
+#define SOF_HDA_ADSP_REG_CL_SD_CBL 0x08
+#define SOF_HDA_ADSP_REG_CL_SD_LVI 0x0C
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOW 0x0E
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE 0x10
+#define SOF_HDA_ADSP_REG_CL_SD_FORMAT 0x12
+#define SOF_HDA_ADSP_REG_CL_SD_FIFOL 0x14
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPL 0x18
+#define SOF_HDA_ADSP_REG_CL_SD_BDLPU 0x1C
+#define SOF_HDA_ADSP_SD_ENTRY_SIZE 0x20
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SOF_DSP_REG_CL_SPBFIFO \
+ (SOF_HDA_ADSP_LOADER_BASE + 0x20)
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCH 0x0
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL 0x4
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB 0x8
+#define SOF_HDA_ADSP_REG_CL_SPBFIFO_MAXFIFOS 0xc
+
+/* Stream Number */
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT 20
+#define SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK \
+ GENMASK(SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT + 3,\
+ SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT)
+
+#define HDA_DSP_HDA_BAR 0
+#define HDA_DSP_PP_BAR 1
+#define HDA_DSP_SPIB_BAR 2
+#define HDA_DSP_DRSM_BAR 3
+#define HDA_DSP_BAR 4
+
+#define SRAM_WINDOW_OFFSET(x) (0x80000 + (x) * 0x20000)
+
+#define HDA_DSP_MBOX_OFFSET SRAM_WINDOW_OFFSET(0)
+
+#define HDA_DSP_PANIC_OFFSET(x) \
+ (((x) & 0xFFFFFF) + HDA_DSP_MBOX_OFFSET)
+
+/* SRAM window 0 FW "registers" */
+#define HDA_DSP_SRAM_REG_ROM_STATUS (HDA_DSP_MBOX_OFFSET + 0x0)
+#define HDA_DSP_SRAM_REG_ROM_ERROR (HDA_DSP_MBOX_OFFSET + 0x4)
+/* FW and ROM share offset 4 */
+#define HDA_DSP_SRAM_REG_FW_STATUS (HDA_DSP_MBOX_OFFSET + 0x4)
+#define HDA_DSP_SRAM_REG_FW_TRACEP (HDA_DSP_MBOX_OFFSET + 0x8)
+#define HDA_DSP_SRAM_REG_FW_END (HDA_DSP_MBOX_OFFSET + 0xc)
+
+#define HDA_DSP_MBOX_UPLINK_OFFSET 0x81000
+
+#define HDA_DSP_STREAM_RESET_TIMEOUT 300
+#define HDA_DSP_CL_TRIGGER_TIMEOUT 300
+
+#define HDA_DSP_SPIB_ENABLE 1
+#define HDA_DSP_SPIB_DISABLE 0
+
+#define SOF_HDA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
+
+#define HDA_DSP_STACK_DUMP_SIZE 32
+
+/* ROM status/error values */
+#define HDA_DSP_ROM_STS_MASK 0xf
+#define HDA_DSP_ROM_INIT 0x1
+#define HDA_DSP_ROM_FW_MANIFEST_LOADED 0x3
+#define HDA_DSP_ROM_FW_FW_LOADED 0x4
+#define HDA_DSP_ROM_FW_ENTERED 0x5
+#define HDA_DSP_ROM_RFW_START 0xf
+#define HDA_DSP_ROM_CSE_ERROR 40
+#define HDA_DSP_ROM_CSE_WRONG_RESPONSE 41
+#define HDA_DSP_ROM_IMR_TO_SMALL 42
+#define HDA_DSP_ROM_BASE_FW_NOT_FOUND 43
+#define HDA_DSP_ROM_CSE_VALIDATION_FAILED 44
+#define HDA_DSP_ROM_IPC_FATAL_ERROR 45
+#define HDA_DSP_ROM_L2_CACHE_ERROR 46
+#define HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL 47
+#define HDA_DSP_ROM_API_PTR_INVALID 50
+#define HDA_DSP_ROM_BASEFW_INCOMPAT 51
+#define HDA_DSP_ROM_UNHANDLED_INTERRUPT 0xBEE00000
+#define HDA_DSP_ROM_MEMORY_HOLE_ECC 0xECC00000
+#define HDA_DSP_ROM_KERNEL_EXCEPTION 0xCAFE0000
+#define HDA_DSP_ROM_USER_EXCEPTION 0xBEEF0000
+#define HDA_DSP_ROM_UNEXPECTED_RESET 0xDECAF000
+#define HDA_DSP_ROM_NULL_FW_ENTRY 0x4c4c4e55
+#define HDA_DSP_IPC_PURGE_FW 0x01004000
+
+/* various timeout values */
+#define HDA_DSP_PU_TIMEOUT 50
+#define HDA_DSP_PD_TIMEOUT 50
+#define HDA_DSP_RESET_TIMEOUT_US 50000
+#define HDA_DSP_BASEFW_TIMEOUT_US 3000000
+#define HDA_DSP_INIT_TIMEOUT_US 500000
+#define HDA_DSP_CTRL_RESET_TIMEOUT 100
+#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
+#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+
+#define HDA_DSP_ADSPIC_IPC 1
+#define HDA_DSP_ADSPIS_IPC 1
+
+/* Intel HD Audio General DSP Registers */
+#define HDA_DSP_GEN_BASE 0x0
+#define HDA_DSP_REG_ADSPCS (HDA_DSP_GEN_BASE + 0x04)
+#define HDA_DSP_REG_ADSPIC (HDA_DSP_GEN_BASE + 0x08)
+#define HDA_DSP_REG_ADSPIS (HDA_DSP_GEN_BASE + 0x0C)
+#define HDA_DSP_REG_ADSPIC2 (HDA_DSP_GEN_BASE + 0x10)
+#define HDA_DSP_REG_ADSPIS2 (HDA_DSP_GEN_BASE + 0x14)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define HDA_DSP_IPC_BASE 0x40
+#define HDA_DSP_REG_HIPCT (HDA_DSP_IPC_BASE + 0x00)
+#define HDA_DSP_REG_HIPCTE (HDA_DSP_IPC_BASE + 0x04)
+#define HDA_DSP_REG_HIPCI (HDA_DSP_IPC_BASE + 0x08)
+#define HDA_DSP_REG_HIPCIE (HDA_DSP_IPC_BASE + 0x0C)
+#define HDA_DSP_REG_HIPCCTL (HDA_DSP_IPC_BASE + 0x10)
+
+/* HIPCI */
+#define HDA_DSP_REG_HIPCI_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCI_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define HDA_DSP_REG_HIPCIE_DONE BIT(30)
+#define HDA_DSP_REG_HIPCIE_MSG_MASK 0x3FFFFFFF
+
+/* HIPCCTL */
+#define HDA_DSP_REG_HIPCCTL_DONE BIT(1)
+#define HDA_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define HDA_DSP_REG_HIPCT_BUSY BIT(31)
+#define HDA_DSP_REG_HIPCT_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTE */
+#define HDA_DSP_REG_HIPCTE_MSG_MASK 0x3FFFFFFF
+
+#define HDA_DSP_ADSPIC_CL_DMA 0x2
+#define HDA_DSP_ADSPIS_CL_DMA 0x2
+
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
+#define FW_CL_STREAM_NUMBER 0x1
+
+/* ADSPCS - Audio DSP Control & Status */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CRST_SHIFT 0
+#define HDA_DSP_ADSPCS_CRST_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CSTALL_SHIFT 8
+#define HDA_DSP_ADSPCS_CSTALL_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_SPA_SHIFT 16
+#define HDA_DSP_ADSPCS_SPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define HDA_DSP_ADSPCS_CPA_SHIFT 24
+#define HDA_DSP_ADSPCS_CPA_MASK(cm) ((cm) << HDA_DSP_ADSPCS_CPA_SHIFT)
+
+/* Mask for a given core index, c = 0.. number of supported cores - 1 */
+#define HDA_DSP_CORE_MASK(c) BIT(c)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SOF_DSP_CORES_MASK(nc) GENMASK(((nc) - 1), 0)
+
+/* Intel HD Audio Inter-Processor Communication Registers for Cannonlake*/
+#define CNL_DSP_IPC_BASE 0xc0
+#define CNL_DSP_REG_HIPCTDR (CNL_DSP_IPC_BASE + 0x00)
+#define CNL_DSP_REG_HIPCTDA (CNL_DSP_IPC_BASE + 0x04)
+#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
+#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
+#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
+
+/* HIPCI */
+#define CNL_DSP_REG_HIPCIDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCIDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCIE */
+#define CNL_DSP_REG_HIPCIDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCIDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCCTL */
+#define CNL_DSP_REG_HIPCCTL_DONE BIT(1)
+#define CNL_DSP_REG_HIPCCTL_BUSY BIT(0)
+
+/* HIPCT */
+#define CNL_DSP_REG_HIPCTDR_BUSY BIT(31)
+#define CNL_DSP_REG_HIPCTDR_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDA */
+#define CNL_DSP_REG_HIPCTDA_DONE BIT(31)
+#define CNL_DSP_REG_HIPCTDA_MSG_MASK 0x7FFFFFFF
+
+/* HIPCTDD */
+#define CNL_DSP_REG_HIPCTDD_MSG_MASK 0x7FFFFFFF
+
+/* BDL */
+#define HDA_DSP_BDL_SIZE 4096
+#define HDA_DSP_MAX_BDL_ENTRIES \
+ (HDA_DSP_BDL_SIZE / sizeof(struct sof_intel_dsp_bdl))
+
+/* Number of DAIs */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+#define SOF_SKL_NUM_DAIS 14
+#else
+#define SOF_SKL_NUM_DAIS 8
+#endif
+
+/* Intel HD Audio SRAM Window 0*/
+#define HDA_ADSP_SRAM0_BASE_SKL 0x8000
+
+/* Firmware status window */
+#define HDA_ADSP_FW_STATUS_SKL HDA_ADSP_SRAM0_BASE_SKL
+#define HDA_ADSP_ERROR_CODE_SKL (HDA_ADSP_FW_STATUS_SKL + 0x4)
+
+/* Host Device Memory Space */
+#define APL_SSP_BASE_OFFSET 0x2000
+#define CNL_SSP_BASE_OFFSET 0x10000
+
+/* Host Device Memory Size of a Single SSP */
+#define SSP_DEV_MEM_SIZE 0x1000
+
+/* SSP Count of the Platform */
+#define APL_SSP_COUNT 6
+#define CNL_SSP_COUNT 3
+
+/* SSP Registers */
+#define SSP_SSC1_OFFSET 0x4
+#define SSP_SET_SCLK_SLAVE BIT(25)
+#define SSP_SET_SFRM_SLAVE BIT(24)
+#define SSP_SET_SLAVE (SSP_SET_SCLK_SLAVE | SSP_SET_SFRM_SLAVE)
+
+#define HDA_IDISP_CODEC(x) ((x) & BIT(2))
+
+struct sof_intel_dsp_bdl {
+ __le32 addr_l;
+ __le32 addr_h;
+ __le32 size;
+ __le32 ioc;
+} __attribute((packed));
+
+#define SOF_HDA_PLAYBACK_STREAMS 16
+#define SOF_HDA_CAPTURE_STREAMS 16
+#define SOF_HDA_PLAYBACK 0
+#define SOF_HDA_CAPTURE 1
+
+/* represents DSP HDA controller frontend - i.e. host facing control */
+struct sof_intel_hda_dev {
+
+ struct hda_bus hbus;
+
+ /* hw config */
+ const struct sof_intel_dsp_desc *desc;
+
+ /* trace */
+ struct hdac_ext_stream *dtrace_stream;
+
+ /* if position update IPC needed */
+ u32 no_ipc_position;
+
+ /* the maximum number of streams (playback + capture) supported */
+ u32 stream_max;
+
+ int irq;
+
+ /* DMIC device */
+ struct platform_device *dmic_dev;
+};
+
+static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus.core;
+}
+
+static inline struct hda_bus *sof_to_hbus(struct snd_sof_dev *s)
+{
+ struct sof_intel_hda_dev *hda = s->pdata->hw_pdata;
+
+ return &hda->hbus;
+}
+
+struct sof_intel_hda_stream {
+ struct hdac_ext_stream hda_stream;
+ struct sof_intel_stream stream;
+ int hw_params_upon_resume; /* set up hw_params upon resume */
+};
+
+#define bus_to_sof_hda(bus) \
+ container_of(bus, struct sof_intel_hda_dev, hbus.core)
+
+#define SOF_STREAM_SD_OFFSET(s) \
+ (SOF_HDA_ADSP_SD_ENTRY_SIZE * ((s)->index) \
+ + SOF_HDA_ADSP_LOADER_BASE)
+
+/*
+ * DSP Core services.
+ */
+int hda_dsp_probe(struct snd_sof_dev *sdev);
+int hda_dsp_remove(struct snd_sof_dev *sdev);
+int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask);
+int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask);
+bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask);
+void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
+void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+
+int hda_dsp_suspend(struct snd_sof_dev *sdev, int state);
+int hda_dsp_resume(struct snd_sof_dev *sdev);
+int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev, int state);
+int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
+void hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
+void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags);
+void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+void hda_ipc_dump(struct snd_sof_dev *sdev);
+
+/*
+ * DSP PCM Operations.
+ */
+int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params);
+int hda_dsp_pcm_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t hda_dsp_pcm_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+/*
+ * DSP Stream Operations.
+ */
+
+int hda_dsp_stream_init(struct snd_sof_dev *sdev);
+void hda_dsp_stream_free(struct snd_sof_dev *sdev);
+int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ struct snd_dma_buffer *dmab,
+ struct snd_pcm_hw_params *params);
+int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream, int cmd);
+irqreturn_t hda_dsp_stream_interrupt(int irq, void *context);
+irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context);
+int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ struct hdac_stream *stream);
+
+struct hdac_ext_stream *
+ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction);
+int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag);
+int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
+ struct hdac_ext_stream *stream,
+ int enable, u32 size);
+
+void hda_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz);
+int hda_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply);
+
+/*
+ * DSP IPC Operations.
+ */
+int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg);
+void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev);
+int hda_dsp_ipc_fw_ready(struct snd_sof_dev *sdev, u32 msg_id);
+irqreturn_t hda_dsp_ipc_irq_handler(int irq, void *context);
+irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context);
+int hda_dsp_ipc_cmd_done(struct snd_sof_dev *sdev, int dir);
+
+/*
+ * DSP Code loader.
+ */
+int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev);
+int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev);
+
+/* pre and post fw run ops */
+int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev);
+int hda_dsp_post_fw_run(struct snd_sof_dev *sdev);
+
+/*
+ * HDA Controller Operations.
+ */
+int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev);
+void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset);
+void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable);
+int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset);
+
+/*
+ * HDA bus operations.
+ */
+void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_ext_bus_ops *ext_ops);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+/*
+ * HDA Codec operations.
+ */
+int hda_codec_probe_bus(struct snd_sof_dev *sdev);
+
+#endif /* CONFIG_SND_SOC_SOF_HDA */
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+
+void hda_codec_i915_get(struct snd_sof_dev *sdev);
+void hda_codec_i915_put(struct snd_sof_dev *sdev);
+int hda_codec_i915_init(struct snd_sof_dev *sdev);
+int hda_codec_i915_exit(struct snd_sof_dev *sdev);
+
+#else
+
+static inline void hda_codec_i915_get(struct snd_sof_dev *sdev) { }
+static inline void hda_codec_i915_put(struct snd_sof_dev *sdev) { }
+static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
+static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
+
+#endif /* CONFIG_SND_SOC_SOF_HDA && CONFIG_SND_SOC_HDAC_HDMI */
+
+/*
+ * Trace Control.
+ */
+int hda_dsp_trace_init(struct snd_sof_dev *sdev, u32 *stream_tag);
+int hda_dsp_trace_release(struct snd_sof_dev *sdev);
+int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd);
+
+/* common dai driver */
+extern struct snd_soc_dai_driver skl_dai[];
+
+/*
+ * Platform Specific HW abstraction Ops.
+ */
+extern const struct snd_sof_dsp_ops sof_apl_ops;
+extern const struct snd_sof_dsp_ops sof_cnl_ops;
+extern const struct snd_sof_dsp_ops sof_skl_ops;
+
+extern const struct sof_intel_dsp_desc apl_chip_info;
+extern const struct sof_intel_dsp_desc cnl_chip_info;
+extern const struct sof_intel_dsp_desc skl_chip_info;
+
+#endif
diff --git a/sound/soc/sof/intel/intel-ipc.c b/sound/soc/sof/intel/intel-ipc.c
new file mode 100644
index 000000000000..4edd92151fd5
--- /dev/null
+++ b/sound/soc/sof/intel/intel-ipc.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+//
+// Authors: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+
+/* Intel-specific SOF IPC code */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <sound/pcm.h>
+#include <sound/sof/stream.h>
+
+#include "../ops.h"
+#include "../sof-priv.h"
+
+struct intel_stream {
+ size_t posn_offset;
+};
+
+/* Mailbox-based Intel IPC implementation */
+void intel_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ if (!substream || !sdev->stream_box.size) {
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+ } else {
+ struct intel_stream *stream = substream->runtime->private_data;
+
+ /* The stream might already be closed */
+ if (stream)
+ sof_mailbox_read(sdev, stream->posn_offset, p, sz);
+ }
+}
+EXPORT_SYMBOL(intel_ipc_msg_data);
+
+int intel_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct intel_stream *stream = substream->runtime->private_data;
+ size_t posn_offset = reply->posn_offset;
+
+ /* check if offset is overflow or it is not aligned */
+ if (posn_offset > sdev->stream_box.size ||
+ posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
+ return -EINVAL;
+
+ stream->posn_offset = sdev->stream_box.offset + posn_offset;
+
+ dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
+ substream->stream, stream->posn_offset);
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_ipc_pcm_params);
+
+int intel_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct intel_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+
+ if (!stream)
+ return -ENOMEM;
+
+ /* binding pcm substream to hda stream */
+ substream->runtime->private_data = stream;
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_pcm_open);
+
+int intel_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ struct intel_stream *stream = substream->runtime->private_data;
+
+ substream->runtime->private_data = NULL;
+ kfree(stream);
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_pcm_close);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
new file mode 100644
index 000000000000..f7a3f62e45d4
--- /dev/null
+++ b/sound/soc/sof/intel/shim.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_SHIM_H
+#define __SOF_INTEL_SHIM_H
+
+/*
+ * SHIM registers for BYT, BSW, CHT, HSW, BDW
+ */
+
+#define SHIM_CSR (SHIM_OFFSET + 0x00)
+#define SHIM_PISR (SHIM_OFFSET + 0x08)
+#define SHIM_PIMR (SHIM_OFFSET + 0x10)
+#define SHIM_ISRX (SHIM_OFFSET + 0x18)
+#define SHIM_ISRD (SHIM_OFFSET + 0x20)
+#define SHIM_IMRX (SHIM_OFFSET + 0x28)
+#define SHIM_IMRD (SHIM_OFFSET + 0x30)
+#define SHIM_IPCX (SHIM_OFFSET + 0x38)
+#define SHIM_IPCD (SHIM_OFFSET + 0x40)
+#define SHIM_ISRSC (SHIM_OFFSET + 0x48)
+#define SHIM_ISRLPESC (SHIM_OFFSET + 0x50)
+#define SHIM_IMRSC (SHIM_OFFSET + 0x58)
+#define SHIM_IMRLPESC (SHIM_OFFSET + 0x60)
+#define SHIM_IPCSC (SHIM_OFFSET + 0x68)
+#define SHIM_IPCLPESC (SHIM_OFFSET + 0x70)
+#define SHIM_CLKCTL (SHIM_OFFSET + 0x78)
+#define SHIM_CSR2 (SHIM_OFFSET + 0x80)
+#define SHIM_LTRC (SHIM_OFFSET + 0xE0)
+#define SHIM_HMDC (SHIM_OFFSET + 0xE8)
+
+#define SHIM_PWMCTRL 0x1000
+
+/*
+ * SST SHIM register bits for BYT, BSW, CHT HSW, BDW
+ * Register bit naming and functionaility can differ between devices.
+ */
+
+/* CSR / CS */
+#define SHIM_CSR_RST BIT(1)
+#define SHIM_CSR_SBCS0 BIT(2)
+#define SHIM_CSR_SBCS1 BIT(3)
+#define SHIM_CSR_DCS(x) ((x) << 4)
+#define SHIM_CSR_DCS_MASK (0x7 << 4)
+#define SHIM_CSR_STALL BIT(10)
+#define SHIM_CSR_S0IOCS BIT(21)
+#define SHIM_CSR_S1IOCS BIT(23)
+#define SHIM_CSR_LPCS BIT(31)
+#define SHIM_CSR_24MHZ_LPCS \
+ (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1 | SHIM_CSR_LPCS)
+#define SHIM_CSR_24MHZ_NO_LPCS (SHIM_CSR_SBCS0 | SHIM_CSR_SBCS1)
+#define SHIM_BYT_CSR_RST BIT(0)
+#define SHIM_BYT_CSR_VECTOR_SEL BIT(1)
+#define SHIM_BYT_CSR_STALL BIT(2)
+#define SHIM_BYT_CSR_PWAITMODE BIT(3)
+
+/* ISRX / ISC */
+#define SHIM_ISRX_BUSY BIT(1)
+#define SHIM_ISRX_DONE BIT(0)
+#define SHIM_BYT_ISRX_REQUEST BIT(1)
+
+/* ISRD / ISD */
+#define SHIM_ISRD_BUSY BIT(1)
+#define SHIM_ISRD_DONE BIT(0)
+
+/* IMRX / IMC */
+#define SHIM_IMRX_BUSY BIT(1)
+#define SHIM_IMRX_DONE BIT(0)
+#define SHIM_BYT_IMRX_REQUEST BIT(1)
+
+/* IMRD / IMD */
+#define SHIM_IMRD_DONE BIT(0)
+#define SHIM_IMRD_BUSY BIT(1)
+#define SHIM_IMRD_SSP0 BIT(16)
+#define SHIM_IMRD_DMAC0 BIT(21)
+#define SHIM_IMRD_DMAC1 BIT(22)
+#define SHIM_IMRD_DMAC (SHIM_IMRD_DMAC0 | SHIM_IMRD_DMAC1)
+
+/* IPCX / IPCC */
+#define SHIM_IPCX_DONE BIT(30)
+#define SHIM_IPCX_BUSY BIT(31)
+#define SHIM_BYT_IPCX_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCX_BUSY BIT_ULL(63)
+
+/* IPCD */
+#define SHIM_IPCD_DONE BIT(30)
+#define SHIM_IPCD_BUSY BIT(31)
+#define SHIM_BYT_IPCD_DONE BIT_ULL(62)
+#define SHIM_BYT_IPCD_BUSY BIT_ULL(63)
+
+/* CLKCTL */
+#define SHIM_CLKCTL_SMOS(x) ((x) << 24)
+#define SHIM_CLKCTL_MASK (3 << 24)
+#define SHIM_CLKCTL_DCPLCG BIT(18)
+#define SHIM_CLKCTL_SCOE1 BIT(17)
+#define SHIM_CLKCTL_SCOE0 BIT(16)
+
+/* CSR2 / CS2 */
+#define SHIM_CSR2_SDFD_SSP0 BIT(1)
+#define SHIM_CSR2_SDFD_SSP1 BIT(2)
+
+/* LTRC */
+#define SHIM_LTRC_VAL(x) ((x) << 0)
+
+/* HMDC */
+#define SHIM_HMDC_HDDA0(x) ((x) << 0)
+#define SHIM_HMDC_HDDA1(x) ((x) << 7)
+#define SHIM_HMDC_HDDA_E0_CH0 1
+#define SHIM_HMDC_HDDA_E0_CH1 2
+#define SHIM_HMDC_HDDA_E0_CH2 4
+#define SHIM_HMDC_HDDA_E0_CH3 8
+#define SHIM_HMDC_HDDA_E1_CH0 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH0)
+#define SHIM_HMDC_HDDA_E1_CH1 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH1)
+#define SHIM_HMDC_HDDA_E1_CH2 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH2)
+#define SHIM_HMDC_HDDA_E1_CH3 SHIM_HMDC_HDDA1(SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E0_ALLCH \
+ (SHIM_HMDC_HDDA_E0_CH0 | SHIM_HMDC_HDDA_E0_CH1 | \
+ SHIM_HMDC_HDDA_E0_CH2 | SHIM_HMDC_HDDA_E0_CH3)
+#define SHIM_HMDC_HDDA_E1_ALLCH \
+ (SHIM_HMDC_HDDA_E1_CH0 | SHIM_HMDC_HDDA_E1_CH1 | \
+ SHIM_HMDC_HDDA_E1_CH2 | SHIM_HMDC_HDDA_E1_CH3)
+
+/* Audio DSP PCI registers */
+#define PCI_VDRTCTL0 0xa0
+#define PCI_VDRTCTL1 0xa4
+#define PCI_VDRTCTL2 0xa8
+#define PCI_VDRTCTL3 0xaC
+
+/* VDRTCTL0 */
+#define PCI_VDRTCL0_D3PGD BIT(0)
+#define PCI_VDRTCL0_D3SRAMPGD BIT(1)
+#define PCI_VDRTCL0_DSRAMPGE_SHIFT 12
+#define PCI_VDRTCL0_DSRAMPGE_MASK GENMASK(PCI_VDRTCL0_DSRAMPGE_SHIFT + 19,\
+ PCI_VDRTCL0_DSRAMPGE_SHIFT)
+#define PCI_VDRTCL0_ISRAMPGE_SHIFT 2
+#define PCI_VDRTCL0_ISRAMPGE_MASK GENMASK(PCI_VDRTCL0_ISRAMPGE_SHIFT + 9,\
+ PCI_VDRTCL0_ISRAMPGE_SHIFT)
+
+/* VDRTCTL2 */
+#define PCI_VDRTCL2_DCLCGE BIT(1)
+#define PCI_VDRTCL2_DTCGE BIT(10)
+#define PCI_VDRTCL2_APLLSE_MASK BIT(31)
+
+/* PMCS */
+#define PCI_PMCS 0x84
+#define PCI_PMCS_PS_MASK 0x3
+
+/* DSP hardware descriptor */
+struct sof_intel_dsp_desc {
+ int cores_num;
+ int cores_mask;
+ int init_core_mask; /* cores available after fw boot */
+ int ipc_req;
+ int ipc_req_mask;
+ int ipc_ack;
+ int ipc_ack_mask;
+ int ipc_ctl;
+ int rom_init_timeout;
+ int ssp_count; /* ssp count of the platform */
+ int ssp_base_offset; /* base address of the SSPs */
+};
+
+extern const struct snd_sof_dsp_ops sof_tng_ops;
+extern const struct snd_sof_dsp_ops sof_byt_ops;
+extern const struct snd_sof_dsp_ops sof_cht_ops;
+extern const struct snd_sof_dsp_ops sof_hsw_ops;
+extern const struct snd_sof_dsp_ops sof_bdw_ops;
+
+extern const struct sof_intel_dsp_desc byt_chip_info;
+extern const struct sof_intel_dsp_desc cht_chip_info;
+extern const struct sof_intel_dsp_desc bdw_chip_info;
+extern const struct sof_intel_dsp_desc hsw_chip_info;
+extern const struct sof_intel_dsp_desc tng_chip_info;
+
+struct sof_intel_stream {
+ size_t posn_offset;
+};
+
+#endif
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
new file mode 100644
index 000000000000..ba1bb17a8d1e
--- /dev/null
+++ b/sound/soc/sof/ipc.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic IPC layer that can work over MMIO and SPI/I2C. PHY layer provided
+// by platform driver code.
+//
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "sof-priv.h"
+#include "ops.h"
+
+/*
+ * IPC message default size and timeout (ms).
+ * TODO: allow platforms to set size and timeout.
+ */
+#define IPC_TIMEOUT_MS 300
+
+static void ipc_trace_message(struct snd_sof_dev *sdev, u32 msg_id);
+static void ipc_stream_message(struct snd_sof_dev *sdev, u32 msg_cmd);
+
+/*
+ * IPC message Tx/Rx message handling.
+ */
+
+/* SOF generic IPC data */
+struct snd_sof_ipc {
+ struct snd_sof_dev *sdev;
+
+ /* protects messages and the disable flag */
+ struct mutex tx_mutex;
+ /* disables further sending of ipc's */
+ bool disable_ipc_tx;
+
+ struct snd_sof_ipc_msg msg;
+};
+
+struct sof_ipc_ctrl_data_params {
+ size_t msg_bytes;
+ size_t hdr_bytes;
+ size_t pl_size;
+ size_t elems;
+ u32 num_msg;
+ u8 *src;
+ u8 *dst;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_VERBOSE_IPC)
+static void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ u8 *str;
+ u8 *str2 = NULL;
+ u32 glb;
+ u32 type;
+
+ glb = cmd & SOF_GLB_TYPE_MASK;
+ type = cmd & SOF_CMD_TYPE_MASK;
+
+ switch (glb) {
+ case SOF_IPC_GLB_REPLY:
+ str = "GLB_REPLY"; break;
+ case SOF_IPC_GLB_COMPOUND:
+ str = "GLB_COMPOUND"; break;
+ case SOF_IPC_GLB_TPLG_MSG:
+ str = "GLB_TPLG_MSG";
+ switch (type) {
+ case SOF_IPC_TPLG_COMP_NEW:
+ str2 = "COMP_NEW"; break;
+ case SOF_IPC_TPLG_COMP_FREE:
+ str2 = "COMP_FREE"; break;
+ case SOF_IPC_TPLG_COMP_CONNECT:
+ str2 = "COMP_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_NEW:
+ str2 = "PIPE_NEW"; break;
+ case SOF_IPC_TPLG_PIPE_FREE:
+ str2 = "PIPE_FREE"; break;
+ case SOF_IPC_TPLG_PIPE_CONNECT:
+ str2 = "PIPE_CONNECT"; break;
+ case SOF_IPC_TPLG_PIPE_COMPLETE:
+ str2 = "PIPE_COMPLETE"; break;
+ case SOF_IPC_TPLG_BUFFER_NEW:
+ str2 = "BUFFER_NEW"; break;
+ case SOF_IPC_TPLG_BUFFER_FREE:
+ str2 = "BUFFER_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_PM_MSG:
+ str = "GLB_PM_MSG";
+ switch (type) {
+ case SOF_IPC_PM_CTX_SAVE:
+ str2 = "CTX_SAVE"; break;
+ case SOF_IPC_PM_CTX_RESTORE:
+ str2 = "CTX_RESTORE"; break;
+ case SOF_IPC_PM_CTX_SIZE:
+ str2 = "CTX_SIZE"; break;
+ case SOF_IPC_PM_CLK_SET:
+ str2 = "CLK_SET"; break;
+ case SOF_IPC_PM_CLK_GET:
+ str2 = "CLK_GET"; break;
+ case SOF_IPC_PM_CLK_REQ:
+ str2 = "CLK_REQ"; break;
+ case SOF_IPC_PM_CORE_ENABLE:
+ str2 = "CORE_ENABLE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_COMP_MSG:
+ str = "GLB_COMP_MSG: SET_VALUE";
+ switch (type) {
+ case SOF_IPC_COMP_SET_VALUE:
+ str2 = "SET_VALUE"; break;
+ case SOF_IPC_COMP_GET_VALUE:
+ str2 = "GET_VALUE"; break;
+ case SOF_IPC_COMP_SET_DATA:
+ str2 = "SET_DATA"; break;
+ case SOF_IPC_COMP_GET_DATA:
+ str2 = "GET_DATA"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ str = "GLB_STREAM_MSG";
+ switch (type) {
+ case SOF_IPC_STREAM_PCM_PARAMS:
+ str2 = "PCM_PARAMS"; break;
+ case SOF_IPC_STREAM_PCM_PARAMS_REPLY:
+ str2 = "PCM_REPLY"; break;
+ case SOF_IPC_STREAM_PCM_FREE:
+ str2 = "PCM_FREE"; break;
+ case SOF_IPC_STREAM_TRIG_START:
+ str2 = "TRIG_START"; break;
+ case SOF_IPC_STREAM_TRIG_STOP:
+ str2 = "TRIG_STOP"; break;
+ case SOF_IPC_STREAM_TRIG_PAUSE:
+ str2 = "TRIG_PAUSE"; break;
+ case SOF_IPC_STREAM_TRIG_RELEASE:
+ str2 = "TRIG_RELEASE"; break;
+ case SOF_IPC_STREAM_TRIG_DRAIN:
+ str2 = "TRIG_DRAIN"; break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ str2 = "TRIG_XRUN"; break;
+ case SOF_IPC_STREAM_POSITION:
+ str2 = "POSITION"; break;
+ case SOF_IPC_STREAM_VORBIS_PARAMS:
+ str2 = "VORBIS_PARAMS"; break;
+ case SOF_IPC_STREAM_VORBIS_FREE:
+ str2 = "VORBIS_FREE"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_FW_READY:
+ str = "FW_READY"; break;
+ case SOF_IPC_GLB_DAI_MSG:
+ str = "GLB_DAI_MSG";
+ switch (type) {
+ case SOF_IPC_DAI_CONFIG:
+ str2 = "CONFIG"; break;
+ case SOF_IPC_DAI_LOOPBACK:
+ str2 = "LOOPBACK"; break;
+ default:
+ str2 = "unknown type"; break;
+ }
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ str = "GLB_TRACE_MSG"; break;
+ default:
+ str = "unknown GLB command"; break;
+ }
+
+ if (str2)
+ dev_dbg(dev, "%s: 0x%x: %s: %s\n", text, cmd, str, str2);
+ else
+ dev_dbg(dev, "%s: 0x%x: %s\n", text, cmd, str);
+}
+#else
+static inline void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
+{
+ dev_dbg(dev, "%s: 0x%x\n", text, cmd);
+}
+#endif
+
+/* wait for IPC message reply */
+static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg,
+ void *reply_data)
+{
+ struct snd_sof_dev *sdev = ipc->sdev;
+ struct sof_ipc_cmd_hdr *hdr = msg->msg_data;
+ int ret;
+
+ /* wait for DSP IPC completion */
+ ret = wait_event_timeout(msg->waitq, msg->ipc_complete,
+ msecs_to_jiffies(IPC_TIMEOUT_MS));
+
+ if (ret == 0) {
+ dev_err(sdev->dev, "error: ipc timed out for 0x%x size %d\n",
+ hdr->cmd, hdr->size);
+ snd_sof_dsp_dbg_dump(ipc->sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_ipc_dump(ipc->sdev);
+ snd_sof_trace_notify_for_error(ipc->sdev);
+ ret = -ETIMEDOUT;
+ } else {
+ /* copy the data returned from DSP */
+ ret = msg->reply_error;
+ if (msg->reply_size)
+ memcpy(reply_data, msg->reply_data, msg->reply_size);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n",
+ hdr->cmd, msg->reply_size);
+ else
+ ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+ }
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+static int sof_ipc_tx_message_unlocked(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
+ struct snd_sof_dev *sdev = ipc->sdev;
+ struct snd_sof_ipc_msg *msg;
+ int ret;
+
+ if (ipc->disable_ipc_tx)
+ return -ENODEV;
+
+ /*
+ * The spin-lock is also still needed to protect message objects against
+ * other atomic contexts.
+ */
+ spin_lock_irq(&sdev->ipc_lock);
+
+ /* initialise the message */
+ msg = &ipc->msg;
+
+ msg->header = header;
+ msg->msg_size = msg_bytes;
+ msg->reply_size = reply_bytes;
+ msg->reply_error = 0;
+
+ /* attach any data */
+ if (msg_bytes)
+ memcpy(msg->msg_data, msg_data, msg_bytes);
+
+ sdev->msg = msg;
+
+ ret = snd_sof_dsp_send_msg(sdev, msg);
+ /* Next reply that we receive will be related to this message */
+ if (!ret)
+ msg->ipc_complete = false;
+
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ if (ret < 0) {
+ /* So far IPC TX never fails, consider making the above void */
+ dev_err_ratelimited(sdev->dev,
+ "error: ipc tx failed with error %d\n",
+ ret);
+ return ret;
+ }
+
+ ipc_log_header(sdev->dev, "ipc tx", msg->header);
+
+ /* now wait for completion */
+ if (!ret)
+ ret = tx_wait_done(ipc, msg, reply_data);
+
+ return ret;
+}
+
+/* send IPC message from host to DSP */
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes, void *reply_data,
+ size_t reply_bytes)
+{
+ int ret;
+
+ if (msg_bytes > SOF_IPC_MSG_MAX_SIZE ||
+ reply_bytes > SOF_IPC_MSG_MAX_SIZE)
+ return -ENOBUFS;
+
+ /* Serialise IPC TX */
+ mutex_lock(&ipc->tx_mutex);
+
+ ret = sof_ipc_tx_message_unlocked(ipc, header, msg_data, msg_bytes,
+ reply_data, reply_bytes);
+
+ mutex_unlock(&ipc->tx_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_tx_message);
+
+/* handle reply message from DSP */
+int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
+ unsigned long flags;
+
+ /*
+ * Protect against a theoretical race with sof_ipc_tx_message(): if the
+ * DSP is fast enough to receive an IPC message, reply to it, and the
+ * host interrupt processing calls this function on a different core
+ * from the one, where the sending is taking place, the message might
+ * not yet be marked as expecting a reply.
+ */
+ spin_lock_irqsave(&sdev->ipc_lock, flags);
+
+ if (msg->ipc_complete) {
+ spin_unlock_irqrestore(&sdev->ipc_lock, flags);
+ dev_err(sdev->dev, "error: no reply expected, received 0x%x",
+ msg_id);
+ return -EINVAL;
+ }
+
+ /* wake up and return the error if we have waiters on this message ? */
+ msg->ipc_complete = true;
+ wake_up(&msg->waitq);
+
+ spin_unlock_irqrestore(&sdev->ipc_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_ipc_reply);
+
+/* DSP firmware has sent host a message */
+void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_cmd_hdr hdr;
+ u32 cmd, type;
+ int err = 0;
+
+ /* read back header */
+ snd_sof_ipc_msg_data(sdev, NULL, &hdr, sizeof(hdr));
+ ipc_log_header(sdev->dev, "ipc rx", hdr.cmd);
+
+ cmd = hdr.cmd & SOF_GLB_TYPE_MASK;
+ type = hdr.cmd & SOF_CMD_TYPE_MASK;
+
+ /* check message type */
+ switch (cmd) {
+ case SOF_IPC_GLB_REPLY:
+ dev_err(sdev->dev, "error: ipc reply unknown\n");
+ break;
+ case SOF_IPC_FW_READY:
+ /* check for FW boot completion */
+ if (!sdev->boot_complete) {
+ err = sof_ops(sdev)->fw_ready(sdev, cmd);
+ if (err < 0) {
+ /*
+ * this indicates a mismatch in ABI
+ * between the driver and fw
+ */
+ dev_err(sdev->dev, "error: ABI mismatch %d\n",
+ err);
+ } else {
+ /* firmware boot completed OK */
+ sdev->boot_complete = true;
+ }
+
+ /* wake up firmware loader */
+ wake_up(&sdev->boot_wait);
+ }
+ break;
+ case SOF_IPC_GLB_COMPOUND:
+ case SOF_IPC_GLB_TPLG_MSG:
+ case SOF_IPC_GLB_PM_MSG:
+ case SOF_IPC_GLB_COMP_MSG:
+ break;
+ case SOF_IPC_GLB_STREAM_MSG:
+ /* need to pass msg id into the function */
+ ipc_stream_message(sdev, hdr.cmd);
+ break;
+ case SOF_IPC_GLB_TRACE_MSG:
+ ipc_trace_message(sdev, type);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown DSP message 0x%x\n", cmd);
+ break;
+ }
+
+ ipc_log_header(sdev->dev, "ipc rx done", hdr.cmd);
+}
+EXPORT_SYMBOL(snd_sof_ipc_msgs_rx);
+
+/*
+ * IPC trace mechanism.
+ */
+
+static void ipc_trace_message(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct sof_ipc_dma_trace_posn posn;
+
+ switch (msg_id) {
+ case SOF_IPC_TRACE_DMA_POSITION:
+ /* read back full message */
+ snd_sof_ipc_msg_data(sdev, NULL, &posn, sizeof(posn));
+ snd_sof_trace_update_pos(sdev, &posn);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unhandled trace message %x\n",
+ msg_id);
+ break;
+ }
+}
+
+/*
+ * IPC stream position.
+ */
+
+static void ipc_period_elapsed(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction;
+
+ spcm = snd_sof_find_spcm_comp(sdev, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev,
+ "error: period elapsed for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ snd_sof_ipc_msg_data(sdev, stream->substream, &posn, sizeof(posn));
+
+ dev_dbg(sdev->dev, "posn : host 0x%llx dai 0x%llx wall 0x%llx\n",
+ posn.host_posn, posn.dai_posn, posn.wallclock);
+
+ memcpy(&stream->posn, &posn, sizeof(posn));
+
+ /* only inform ALSA for period_wakeup mode */
+ if (!stream->substream->runtime->no_period_wakeup)
+ snd_sof_pcm_period_elapsed(stream->substream);
+}
+
+/* DSP notifies host of an XRUN within FW */
+static void ipc_xrun(struct snd_sof_dev *sdev, u32 msg_id)
+{
+ struct snd_sof_pcm_stream *stream;
+ struct sof_ipc_stream_posn posn;
+ struct snd_sof_pcm *spcm;
+ int direction;
+
+ spcm = snd_sof_find_spcm_comp(sdev, msg_id, &direction);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: XRUN for unknown stream, msg_id %d\n",
+ msg_id);
+ return;
+ }
+
+ stream = &spcm->stream[direction];
+ snd_sof_ipc_msg_data(sdev, stream->substream, &posn, sizeof(posn));
+
+ dev_dbg(sdev->dev, "posn XRUN: host %llx comp %d size %d\n",
+ posn.host_posn, posn.xrun_comp_id, posn.xrun_size);
+
+#if defined(CONFIG_SND_SOC_SOF_DEBUG_XRUN_STOP)
+ /* stop PCM on XRUN - used for pipeline debug */
+ memcpy(&stream->posn, &posn, sizeof(posn));
+ snd_pcm_stop_xrun(stream->substream);
+#endif
+}
+
+/* stream notifications from DSP FW */
+static void ipc_stream_message(struct snd_sof_dev *sdev, u32 msg_cmd)
+{
+ /* get msg cmd type and msd id */
+ u32 msg_type = msg_cmd & SOF_CMD_TYPE_MASK;
+ u32 msg_id = SOF_IPC_MESSAGE_ID(msg_cmd);
+
+ switch (msg_type) {
+ case SOF_IPC_STREAM_POSITION:
+ ipc_period_elapsed(sdev, msg_id);
+ break;
+ case SOF_IPC_STREAM_TRIG_XRUN:
+ ipc_xrun(sdev, msg_id);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unhandled stream message %x\n",
+ msg_id);
+ break;
+ }
+}
+
+/* get stream position IPC - use faster MMIO method if available on platform */
+int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm, int direction,
+ struct sof_ipc_stream_posn *posn)
+{
+ struct sof_ipc_stream stream;
+ int err;
+
+ /* read position via slower IPC */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_POSITION;
+ stream.comp_id = spcm->stream[direction].comp_id;
+
+ /* send IPC to the DSP */
+ err = sof_ipc_tx_message(sdev->ipc,
+ stream.hdr.cmd, &stream, sizeof(stream), &posn,
+ sizeof(*posn));
+ if (err < 0) {
+ dev_err(sdev->dev, "error: failed to get stream %d position\n",
+ stream.comp_id);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_ipc_stream_posn);
+
+static int sof_get_ctrl_copy_params(enum sof_ipc_ctrl_type ctrl_type,
+ struct sof_ipc_ctrl_data *src,
+ struct sof_ipc_ctrl_data *dst,
+ struct sof_ipc_ctrl_data_params *sparams)
+{
+ switch (ctrl_type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ sparams->src = (u8 *)src->chanv;
+ sparams->dst = (u8 *)dst->chanv;
+ break;
+ case SOF_CTRL_TYPE_VALUE_COMP_GET:
+ case SOF_CTRL_TYPE_VALUE_COMP_SET:
+ sparams->src = (u8 *)src->compv;
+ sparams->dst = (u8 *)dst->compv;
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ sparams->src = (u8 *)src->data->data;
+ sparams->dst = (u8 *)dst->data->data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* calculate payload size and number of messages */
+ sparams->pl_size = SOF_IPC_MSG_MAX_SIZE - sparams->hdr_bytes;
+ sparams->num_msg = DIV_ROUND_UP(sparams->msg_bytes, sparams->pl_size);
+
+ return 0;
+}
+
+static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev,
+ struct sof_ipc_ctrl_data *cdata,
+ struct sof_ipc_ctrl_data_params *sparams,
+ bool send)
+{
+ struct sof_ipc_ctrl_data *partdata;
+ size_t send_bytes;
+ size_t offset = 0;
+ size_t msg_bytes;
+ size_t pl_size;
+ int err = 0;
+ int i;
+
+ /* allocate max ipc size because we have at least one */
+ partdata = kzalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!partdata)
+ return -ENOMEM;
+
+ if (send)
+ sof_get_ctrl_copy_params(cdata->type, cdata, partdata, sparams);
+ else
+ sof_get_ctrl_copy_params(cdata->type, partdata, cdata, sparams);
+
+ msg_bytes = sparams->msg_bytes;
+ pl_size = sparams->pl_size;
+
+ /* copy the header data */
+ memcpy(partdata, cdata, sparams->hdr_bytes);
+
+ /* Serialise IPC TX */
+ mutex_lock(&sdev->ipc->tx_mutex);
+
+ /* copy the payload data in a loop */
+ for (i = 0; i < sparams->num_msg; i++) {
+ send_bytes = min(msg_bytes, pl_size);
+ partdata->num_elems = send_bytes;
+ partdata->rhdr.hdr.size = sparams->hdr_bytes + send_bytes;
+ partdata->msg_index = i;
+ msg_bytes -= send_bytes;
+ partdata->elems_remaining = msg_bytes;
+
+ if (send)
+ memcpy(sparams->dst, sparams->src + offset, send_bytes);
+
+ err = sof_ipc_tx_message_unlocked(sdev->ipc,
+ partdata->rhdr.hdr.cmd,
+ partdata,
+ partdata->rhdr.hdr.size,
+ partdata,
+ partdata->rhdr.hdr.size);
+ if (err < 0)
+ break;
+
+ if (!send)
+ memcpy(sparams->dst + offset, sparams->src, send_bytes);
+
+ offset += pl_size;
+ }
+
+ mutex_unlock(&sdev->ipc->tx_mutex);
+
+ kfree(partdata);
+ return err;
+}
+
+/*
+ * IPC get()/set() for kcontrols.
+ */
+int snd_sof_ipc_set_get_comp_data(struct snd_sof_ipc *ipc,
+ struct snd_sof_control *scontrol,
+ u32 ipc_cmd,
+ enum sof_ipc_ctrl_type ctrl_type,
+ enum sof_ipc_ctrl_cmd ctrl_cmd,
+ bool send)
+{
+ struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
+ struct snd_sof_dev *sdev = ipc->sdev;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ struct sof_ipc_ctrl_data_params sparams;
+ size_t send_bytes;
+ int err;
+
+ /* read or write firmware volume */
+ if (scontrol->readback_offset != 0) {
+ /* write/read value header via mmaped region */
+ send_bytes = sizeof(struct sof_ipc_ctrl_value_chan) *
+ cdata->num_elems;
+ if (send)
+ snd_sof_dsp_block_write(sdev, sdev->mmio_bar,
+ scontrol->readback_offset,
+ cdata->chanv, send_bytes);
+
+ else
+ snd_sof_dsp_block_read(sdev, sdev->mmio_bar,
+ scontrol->readback_offset,
+ cdata->chanv, send_bytes);
+ return 0;
+ }
+
+ cdata->rhdr.hdr.cmd = SOF_IPC_GLB_COMP_MSG | ipc_cmd;
+ cdata->cmd = ctrl_cmd;
+ cdata->type = ctrl_type;
+ cdata->comp_id = scontrol->comp_id;
+ cdata->msg_index = 0;
+
+ /* calculate header and data size */
+ switch (cdata->type) {
+ case SOF_CTRL_TYPE_VALUE_CHAN_GET:
+ case SOF_CTRL_TYPE_VALUE_CHAN_SET:
+ sparams.msg_bytes = scontrol->num_channels *
+ sizeof(struct sof_ipc_ctrl_value_chan);
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
+ sparams.elems = scontrol->num_channels;
+ break;
+ case SOF_CTRL_TYPE_VALUE_COMP_GET:
+ case SOF_CTRL_TYPE_VALUE_COMP_SET:
+ sparams.msg_bytes = scontrol->num_channels *
+ sizeof(struct sof_ipc_ctrl_value_comp);
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data);
+ sparams.elems = scontrol->num_channels;
+ break;
+ case SOF_CTRL_TYPE_DATA_GET:
+ case SOF_CTRL_TYPE_DATA_SET:
+ sparams.msg_bytes = cdata->data->size;
+ sparams.hdr_bytes = sizeof(struct sof_ipc_ctrl_data) +
+ sizeof(struct sof_abi_hdr);
+ sparams.elems = cdata->data->size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cdata->rhdr.hdr.size = sparams.msg_bytes + sparams.hdr_bytes;
+ cdata->num_elems = sparams.elems;
+ cdata->elems_remaining = 0;
+
+ /* send normal size ipc in one part */
+ if (cdata->rhdr.hdr.size <= SOF_IPC_MSG_MAX_SIZE) {
+ err = sof_ipc_tx_message(sdev->ipc, cdata->rhdr.hdr.cmd, cdata,
+ cdata->rhdr.hdr.size, cdata,
+ cdata->rhdr.hdr.size);
+
+ if (err < 0)
+ dev_err(sdev->dev, "error: set/get ctrl ipc comp %d\n",
+ cdata->comp_id);
+
+ return err;
+ }
+
+ /* data is bigger than max ipc size, chop into smaller pieces */
+ dev_dbg(sdev->dev, "large ipc size %u, control size %u\n",
+ cdata->rhdr.hdr.size, scontrol->size);
+
+ /* large messages is only supported from ABI 3.3.0 onwards */
+ if (v->abi_version < SOF_ABI_VER(3, 3, 0)) {
+ dev_err(sdev->dev, "error: incompatible FW ABI version\n");
+ return -EINVAL;
+ }
+
+ err = sof_set_get_large_ctrl_data(sdev, cdata, &sparams, send);
+
+ if (err < 0)
+ dev_err(sdev->dev, "error: set/get large ctrl ipc comp %d\n",
+ cdata->comp_id);
+
+ return err;
+}
+EXPORT_SYMBOL(snd_sof_ipc_set_get_comp_data);
+
+/*
+ * IPC layer enumeration.
+ */
+
+int snd_sof_dsp_mailbox_init(struct snd_sof_dev *sdev, u32 dspbox,
+ size_t dspbox_size, u32 hostbox,
+ size_t hostbox_size)
+{
+ sdev->dsp_box.offset = dspbox;
+ sdev->dsp_box.size = dspbox_size;
+ sdev->host_box.offset = hostbox;
+ sdev->host_box.size = hostbox_size;
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_dsp_mailbox_init);
+
+int snd_sof_ipc_valid(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+
+ dev_info(sdev->dev,
+ "Firmware info: version %d:%d:%d-%s\n", v->major, v->minor,
+ v->micro, v->tag);
+ dev_info(sdev->dev,
+ "Firmware: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(v->abi_version),
+ SOF_ABI_VERSION_MINOR(v->abi_version),
+ SOF_ABI_VERSION_PATCH(v->abi_version),
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, v->abi_version)) {
+ dev_err(sdev->dev, "error: incompatible FW ABI version\n");
+ return -EINVAL;
+ }
+
+ if (v->abi_version > SOF_ABI_VERSION) {
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
+ dev_warn(sdev->dev, "warn: FW ABI is more recent than kernel\n");
+ } else {
+ dev_err(sdev->dev, "error: FW ABI is more recent than kernel\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ready->debug.bits.build) {
+ dev_info(sdev->dev,
+ "Firmware debug build %d on %s-%s - options:\n"
+ " GDB: %s\n"
+ " lock debug: %s\n"
+ " lock vdebug: %s\n",
+ v->build, v->date, v->time,
+ ready->debug.bits.gdb ? "enabled" : "disabled",
+ ready->debug.bits.locks ? "enabled" : "disabled",
+ ready->debug.bits.locks_verbose ? "enabled" : "disabled");
+ }
+
+ /* copy the fw_version into debugfs at first boot */
+ memcpy(&sdev->fw_version, v, sizeof(*v));
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_ipc_valid);
+
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_ipc_msg *msg;
+
+ /* check if mandatory ops required for ipc are defined */
+ if (!sof_ops(sdev)->fw_ready) {
+ dev_err(sdev->dev, "error: ipc mandatory ops not defined\n");
+ return NULL;
+ }
+
+ ipc = devm_kzalloc(sdev->dev, sizeof(*ipc), GFP_KERNEL);
+ if (!ipc)
+ return NULL;
+
+ mutex_init(&ipc->tx_mutex);
+ ipc->sdev = sdev;
+ msg = &ipc->msg;
+
+ /* indicate that we aren't sending a message ATM */
+ msg->ipc_complete = true;
+
+ /* pre-allocate message data */
+ msg->msg_data = devm_kzalloc(sdev->dev, SOF_IPC_MSG_MAX_SIZE,
+ GFP_KERNEL);
+ if (!msg->msg_data)
+ return NULL;
+
+ msg->reply_data = devm_kzalloc(sdev->dev, SOF_IPC_MSG_MAX_SIZE,
+ GFP_KERNEL);
+ if (!msg->reply_data)
+ return NULL;
+
+ init_waitqueue_head(&msg->waitq);
+
+ return ipc;
+}
+EXPORT_SYMBOL(snd_sof_ipc_init);
+
+void snd_sof_ipc_free(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc *ipc = sdev->ipc;
+
+ /* disable sending of ipc's */
+ mutex_lock(&ipc->tx_mutex);
+ ipc->disable_ipc_tx = true;
+ mutex_unlock(&ipc->tx_mutex);
+}
+EXPORT_SYMBOL(snd_sof_ipc_free);
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
new file mode 100644
index 000000000000..81c7452aae17
--- /dev/null
+++ b/sound/soc/sof/loader.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// Generic firmware loader.
+//
+
+#include <linux/firmware.h>
+#include <sound/sof.h>
+#include "ops.h"
+
+static int get_ext_windows(struct snd_sof_dev *sdev,
+ struct sof_ipc_ext_data_hdr *ext_hdr)
+{
+ struct sof_ipc_window *w =
+ container_of(ext_hdr, struct sof_ipc_window, ext_hdr);
+ size_t size;
+
+ if (w->num_windows == 0 || w->num_windows > SOF_IPC_MAX_ELEMS)
+ return -EINVAL;
+
+ size = sizeof(*w) + sizeof(struct sof_ipc_window_elem) * w->num_windows;
+
+ /* keep a local copy of the data */
+ sdev->info_window = kmemdup(w, size, GFP_KERNEL);
+ if (!sdev->info_window)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* parse the extended FW boot data structures from FW boot message */
+int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
+{
+ struct sof_ipc_ext_data_hdr *ext_hdr;
+ void *ext_data;
+ int ret = 0;
+
+ ext_data = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ext_data)
+ return -ENOMEM;
+
+ /* get first header */
+ snd_sof_dsp_block_read(sdev, bar, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+
+ while (ext_hdr->hdr.cmd == SOF_IPC_FW_READY) {
+ /* read in ext structure */
+ offset += sizeof(*ext_hdr);
+ snd_sof_dsp_block_read(sdev, bar, offset,
+ (void *)((u8 *)ext_data + sizeof(*ext_hdr)),
+ ext_hdr->hdr.size - sizeof(*ext_hdr));
+
+ dev_dbg(sdev->dev, "found ext header type %d size 0x%x\n",
+ ext_hdr->type, ext_hdr->hdr.size);
+
+ /* process structure data */
+ switch (ext_hdr->type) {
+ case SOF_IPC_EXT_DMA_BUFFER:
+ break;
+ case SOF_IPC_EXT_WINDOW:
+ ret = get_ext_windows(sdev, ext_hdr);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to parse ext data type %d\n",
+ ext_hdr->type);
+ break;
+ }
+
+ /* move to next header */
+ offset += ext_hdr->hdr.size;
+ snd_sof_dsp_block_read(sdev, bar, offset, ext_data,
+ sizeof(*ext_hdr));
+ ext_hdr = ext_data;
+ }
+
+ kfree(ext_data);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_fw_parse_ext_data);
+
+/* generic module parser for mmaped DSPs */
+int snd_sof_parse_module_memcpy(struct snd_sof_dev *sdev,
+ struct snd_sof_mod_hdr *module)
+{
+ struct snd_sof_blk_hdr *block;
+ int count;
+ u32 offset;
+ size_t remaining;
+
+ dev_dbg(sdev->dev, "new module size 0x%x blocks 0x%x type 0x%x\n",
+ module->size, module->num_blocks, module->type);
+
+ block = (struct snd_sof_blk_hdr *)((u8 *)module + sizeof(*module));
+
+ /* module->size doesn't include header size */
+ remaining = module->size;
+ for (count = 0; count < module->num_blocks; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*block)) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus header size of block */
+ remaining -= sizeof(*block);
+
+ if (block->size == 0) {
+ dev_warn(sdev->dev,
+ "warning: block %d size zero\n", count);
+ dev_warn(sdev->dev, " type 0x%x offset 0x%x\n",
+ block->type, block->offset);
+ continue;
+ }
+
+ switch (block->type) {
+ case SOF_FW_BLK_TYPE_RSRVD0:
+ case SOF_FW_BLK_TYPE_SRAM...SOF_FW_BLK_TYPE_RSRVD14:
+ continue; /* not handled atm */
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_DRAM:
+ offset = block->offset;
+ break;
+ default:
+ dev_err(sdev->dev, "error: bad type 0x%x for block 0x%x\n",
+ block->type, count);
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev,
+ "block %d type 0x%x size 0x%x ==> offset 0x%x\n",
+ count, block->type, block->size, offset);
+
+ /* checking block->size to avoid unaligned access */
+ if (block->size % sizeof(u32)) {
+ dev_err(sdev->dev, "error: invalid block size 0x%x\n",
+ block->size);
+ return -EINVAL;
+ }
+ snd_sof_dsp_block_write(sdev, sdev->mmio_bar, offset,
+ block + 1, block->size);
+
+ if (remaining < block->size) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus body size of block */
+ remaining -= block->size;
+ /* next block */
+ block = (struct snd_sof_blk_hdr *)((u8 *)block + sizeof(*block)
+ + block->size);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_parse_module_memcpy);
+
+static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw)
+{
+ struct snd_sof_fw_header *header;
+
+ /* Read the header information from the data pointer */
+ header = (struct snd_sof_fw_header *)fw->data;
+
+ /* verify FW sig */
+ if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
+ dev_err(sdev->dev, "error: invalid firmware signature\n");
+ return -EINVAL;
+ }
+
+ /* check size is valid */
+ if (fw->size != header->file_size + sizeof(*header)) {
+ dev_err(sdev->dev, "error: invalid filesize mismatch got 0x%zx expected 0x%zx\n",
+ fw->size, header->file_size + sizeof(*header));
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "header size=0x%x modules=0x%x abi=0x%x size=%zu\n",
+ header->file_size, header->num_modules,
+ header->abi, sizeof(*header));
+
+ return 0;
+}
+
+static int load_modules(struct snd_sof_dev *sdev, const struct firmware *fw)
+{
+ struct snd_sof_fw_header *header;
+ struct snd_sof_mod_hdr *module;
+ int (*load_module)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_mod_hdr *hdr);
+ int ret, count;
+ size_t remaining;
+
+ header = (struct snd_sof_fw_header *)fw->data;
+ load_module = sof_ops(sdev)->load_module;
+ if (!load_module)
+ return -EINVAL;
+
+ /* parse each module */
+ module = (struct snd_sof_mod_hdr *)((u8 *)(fw->data) + sizeof(*header));
+ remaining = fw->size - sizeof(*header);
+ /* check for wrap */
+ if (remaining > fw->size) {
+ dev_err(sdev->dev, "error: fw size smaller than header size\n");
+ return -EINVAL;
+ }
+
+ for (count = 0; count < header->num_modules; count++) {
+ /* check for wrap */
+ if (remaining < sizeof(*module)) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus header size of module */
+ remaining -= sizeof(*module);
+
+ /* module */
+ ret = load_module(sdev, module);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid module %d\n", count);
+ return ret;
+ }
+
+ if (remaining < module->size) {
+ dev_err(sdev->dev, "error: not enough data remaining\n");
+ return -EINVAL;
+ }
+
+ /* minus body size of module */
+ remaining -= module->size;
+ module = (struct snd_sof_mod_hdr *)((u8 *)module
+ + sizeof(*module) + module->size);
+ }
+
+ return 0;
+}
+
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *fw_filename;
+ int ret;
+
+ /* set code loading condition to true */
+ sdev->code_loading = 1;
+
+ /* Don't request firmware again if firmware is already requested */
+ if (plat_data->fw)
+ return 0;
+
+ fw_filename = kasprintf(GFP_KERNEL, "%s/%s",
+ plat_data->fw_filename_prefix,
+ plat_data->fw_filename);
+ if (!fw_filename)
+ return -ENOMEM;
+
+ ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
+ fw_filename, ret);
+ }
+
+ kfree(fw_filename);
+
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_raw);
+
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ int ret;
+
+ ret = snd_sof_load_firmware_raw(sdev);
+ if (ret < 0)
+ return ret;
+
+ /* make sure the FW header and file is valid */
+ ret = check_header(sdev, plat_data->fw);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid FW header\n");
+ goto error;
+ }
+
+ /* prepare the DSP for FW loading */
+ ret = snd_sof_dsp_reset(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset DSP\n");
+ goto error;
+ }
+
+ /* parse and load firmware modules to DSP */
+ ret = load_modules(sdev, plat_data->fw);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: invalid FW modules\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ release_firmware(plat_data->fw);
+ plat_data->fw = NULL;
+ return ret;
+
+}
+EXPORT_SYMBOL(snd_sof_load_firmware_memcpy);
+
+int snd_sof_load_firmware(struct snd_sof_dev *sdev)
+{
+ dev_dbg(sdev->dev, "loading firmware\n");
+
+ if (sof_ops(sdev)->load_firmware)
+ return sof_ops(sdev)->load_firmware(sdev);
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_load_firmware);
+
+int snd_sof_run_firmware(struct snd_sof_dev *sdev)
+{
+ int ret;
+ int init_core_mask;
+
+ init_waitqueue_head(&sdev->boot_wait);
+ sdev->boot_complete = false;
+
+ /* create fw_version debugfs to store boot version info */
+ if (sdev->first_boot) {
+ ret = snd_sof_debugfs_buf_item(sdev, &sdev->fw_version,
+ sizeof(sdev->fw_version),
+ "fw_version");
+ /* errors are only due to memory allocation, not debugfs */
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: snd_sof_debugfs_buf_item failed\n");
+ return ret;
+ }
+ }
+
+ /* perform pre fw run operations */
+ ret = snd_sof_dsp_pre_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed pre fw run op\n");
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "booting DSP firmware\n");
+
+ /* boot the firmware on the DSP */
+ ret = snd_sof_dsp_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to reset DSP\n");
+ return ret;
+ }
+
+ init_core_mask = ret;
+
+ /* now wait for the DSP to boot */
+ ret = wait_event_timeout(sdev->boot_wait, sdev->boot_complete,
+ msecs_to_jiffies(sdev->boot_timeout));
+ if (ret == 0) {
+ dev_err(sdev->dev, "error: firmware boot failure\n");
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
+ SOF_DBG_TEXT | SOF_DBG_PCI);
+ return -EIO;
+ }
+
+ dev_info(sdev->dev, "firmware boot complete\n");
+
+ /* perform post fw run operations */
+ ret = snd_sof_dsp_post_fw_run(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed post fw run op\n");
+ return ret;
+ }
+
+ /* fw boot is complete. Update the active cores mask */
+ sdev->enabled_cores_mask = init_core_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_run_firmware);
+
+void snd_sof_fw_unload(struct snd_sof_dev *sdev)
+{
+ /* TODO: support module unloading at runtime */
+}
+EXPORT_SYMBOL(snd_sof_fw_unload);
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
new file mode 100644
index 000000000000..f84b4344dcc3
--- /dev/null
+++ b/sound/soc/sof/nocodec.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+
+static struct snd_soc_card sof_nocodec_card = {
+ .name = "nocodec", /* the sof- prefix is added by the core */
+};
+
+static int sof_nocodec_bes_setup(struct device *dev,
+ const struct snd_sof_dsp_ops *ops,
+ struct snd_soc_dai_link *links,
+ int link_num, struct snd_soc_card *card)
+{
+ int i;
+
+ if (!ops || !links || !card)
+ return -EINVAL;
+
+ /* set up BE dai_links */
+ for (i = 0; i < link_num; i++) {
+ links[i].name = devm_kasprintf(dev, GFP_KERNEL,
+ "NoCodec-%d", i);
+ if (!links[i].name)
+ return -ENOMEM;
+
+ links[i].id = i;
+ links[i].no_pcm = 1;
+ links[i].cpu_dai_name = ops->drv[i].name;
+ links[i].platform_name = dev_name(dev);
+ links[i].codec_dai_name = "snd-soc-dummy-dai";
+ links[i].codec_name = "snd-soc-dummy";
+ links[i].dpcm_playback = 1;
+ links[i].dpcm_capture = 1;
+ }
+
+ card->dai_link = links;
+ card->num_links = link_num;
+
+ return 0;
+}
+
+int sof_nocodec_setup(struct device *dev,
+ struct snd_sof_pdata *sof_pdata,
+ struct snd_soc_acpi_mach *mach,
+ const struct sof_dev_desc *desc,
+ const struct snd_sof_dsp_ops *ops)
+{
+ struct snd_soc_dai_link *links;
+ int ret;
+
+ if (!mach)
+ return -EINVAL;
+
+ sof_pdata->drv_name = "sof-nocodec";
+
+ mach->drv_name = "sof-nocodec";
+ sof_pdata->fw_filename = desc->nocodec_fw_filename;
+ sof_pdata->tplg_filename = desc->nocodec_tplg_filename;
+
+ /* create dummy BE dai_links */
+ links = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link) *
+ ops->num_drv, GFP_KERNEL);
+ if (!links)
+ return -ENOMEM;
+
+ ret = sof_nocodec_bes_setup(dev, ops, links, ops->num_drv,
+ &sof_nocodec_card);
+ return ret;
+}
+EXPORT_SYMBOL(sof_nocodec_setup);
+
+static int sof_nocodec_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &sof_nocodec_card;
+
+ card->dev = &pdev->dev;
+
+ return devm_snd_soc_register_card(&pdev->dev, card);
+}
+
+static int sof_nocodec_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver sof_nocodec_audio = {
+ .probe = sof_nocodec_probe,
+ .remove = sof_nocodec_remove,
+ .driver = {
+ .name = "sof-nocodec",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(sof_nocodec_audio)
+
+MODULE_DESCRIPTION("ASoC sof nocodec");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:sof-nocodec");
diff --git a/sound/soc/sof/ops.c b/sound/soc/sof/ops.c
new file mode 100644
index 000000000000..80f907740b82
--- /dev/null
+++ b/sound/soc/sof/ops.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/pci.h>
+#include "ops.h"
+
+static
+bool snd_sof_pci_update_bits_unlocked(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+ unsigned int old, new;
+ u32 ret;
+
+ pci_read_config_dword(pci, offset, &ret);
+ old = ret;
+ dev_dbg(sdev->dev, "Debug PCIR: %8.8x at %8.8x\n", old & mask, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ pci_write_config_dword(pci, offset, new);
+ dev_dbg(sdev->dev, "Debug PCIW: %8.8x at %8.8x\n", value,
+ offset);
+
+ return true;
+}
+
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_pci_update_bits_unlocked(sdev, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_pci_update_bits);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_unlocked);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value)
+{
+ u64 old, new;
+
+ old = snd_sof_dsp_read64(sdev, bar, offset);
+
+ new = (old & ~mask) | (value & mask);
+
+ if (old == new)
+ return false;
+
+ snd_sof_dsp_write64(sdev, bar, offset, new);
+
+ return true;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64_unlocked);
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u64 mask, u64 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ change = snd_sof_dsp_update_bits64_unlocked(sdev, bar, offset, mask,
+ value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+ return change;
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits64);
+
+static
+void snd_sof_dsp_update_bits_forced_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned int old, new;
+ u32 ret;
+
+ ret = snd_sof_dsp_read(sdev, bar, offset);
+
+ old = ret;
+ new = (old & ~mask) | (value & mask);
+
+ snd_sof_dsp_write(sdev, bar, offset, new);
+}
+
+/* This is for registers bits with attribute RWC */
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->hw_lock, flags);
+ snd_sof_dsp_update_bits_forced_unlocked(sdev, bar, offset, mask, value);
+ spin_unlock_irqrestore(&sdev->hw_lock, flags);
+}
+EXPORT_SYMBOL(snd_sof_dsp_update_bits_forced);
+
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset)
+{
+ dev_err(sdev->dev, "error : DSP panic!\n");
+
+ /*
+ * check if DSP is not ready and did not set the dsp_oops_offset.
+ * if the dsp_oops_offset is not set, set it from the panic message.
+ * Also add a check to memory window setting with panic message.
+ */
+ if (!sdev->dsp_oops_offset)
+ sdev->dsp_oops_offset = offset;
+ else
+ dev_dbg(sdev->dev, "panic: dsp_oops_offset %zu offset %d\n",
+ sdev->dsp_oops_offset, offset);
+
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_trace_notify_for_error(sdev);
+}
+EXPORT_SYMBOL(snd_sof_dsp_panic);
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
new file mode 100644
index 000000000000..80fc3b374c2b
--- /dev/null
+++ b/sound/soc/sof/ops.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_IO_H
+#define __SOUND_SOC_SOF_IO_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <sound/pcm.h>
+#include "sof-priv.h"
+
+#define sof_ops(sdev) \
+ ((sdev)->pdata->desc->ops)
+
+/* Mandatory operations are verified during probing */
+
+/* init */
+static inline int snd_sof_probe(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->probe(sdev);
+}
+
+static inline int snd_sof_remove(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->remove)
+ return sof_ops(sdev)->remove(sdev);
+
+ return 0;
+}
+
+/* control */
+
+/*
+ * snd_sof_dsp_run returns the core mask of the cores that are available
+ * after successful fw boot
+ */
+static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
+{
+ return sof_ops(sdev)->run(sdev);
+}
+
+static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->stall)
+ return sof_ops(sdev)->stall(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->reset)
+ return sof_ops(sdev)->reset(sdev);
+
+ return 0;
+}
+
+/* dsp core power up/power down */
+static inline int snd_sof_dsp_core_power_up(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ if (sof_ops(sdev)->core_power_up)
+ return sof_ops(sdev)->core_power_up(sdev, core_mask);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_core_power_down(struct snd_sof_dev *sdev,
+ unsigned int core_mask)
+{
+ if (sof_ops(sdev)->core_power_down)
+ return sof_ops(sdev)->core_power_down(sdev, core_mask);
+
+ return 0;
+}
+
+/* pre/post fw load */
+static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->pre_fw_run)
+ return sof_ops(sdev)->pre_fw_run(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->post_fw_run)
+ return sof_ops(sdev)->post_fw_run(sdev);
+
+ return 0;
+}
+
+/* power management */
+static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->resume)
+ return sof_ops(sdev)->resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, int state)
+{
+ if (sof_ops(sdev)->suspend)
+ return sof_ops(sdev)->suspend(sdev, state);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->runtime_resume)
+ return sof_ops(sdev)->runtime_resume(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev,
+ int state)
+{
+ if (sof_ops(sdev)->runtime_suspend)
+ return sof_ops(sdev)->runtime_suspend(sdev, state);
+
+ return 0;
+}
+
+static inline void snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->set_hw_params_upon_resume)
+ sof_ops(sdev)->set_hw_params_upon_resume(sdev);
+}
+
+static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
+{
+ if (sof_ops(sdev)->set_clk)
+ return sof_ops(sdev)->set_clk(sdev, freq);
+
+ return 0;
+}
+
+/* debug */
+static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ if (sof_ops(sdev)->dbg_dump)
+ return sof_ops(sdev)->dbg_dump(sdev, flags);
+}
+
+static inline void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->ipc_dump)
+ return sof_ops(sdev)->ipc_dump(sdev);
+}
+
+/* register IO */
+static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 value)
+{
+ if (sof_ops(sdev)->write) {
+ sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
+ return;
+ }
+
+ dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
+}
+
+static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 value)
+{
+ if (sof_ops(sdev)->write64) {
+ sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
+ return;
+ }
+
+ dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
+}
+
+static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read)
+ return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset)
+{
+ if (sof_ops(sdev)->read64)
+ return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
+
+ dev_err(sdev->dev, "error: %s not defined\n", __func__);
+ return -ENOTSUPP;
+}
+
+/* block IO */
+static inline void snd_sof_dsp_block_read(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, void *dest, size_t bytes)
+{
+ sof_ops(sdev)->block_read(sdev, bar, offset, dest, bytes);
+}
+
+static inline void snd_sof_dsp_block_write(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, void *src, size_t bytes)
+{
+ sof_ops(sdev)->block_write(sdev, bar, offset, src, bytes);
+}
+
+/* ipc */
+static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
+ struct snd_sof_ipc_msg *msg)
+{
+ return sof_ops(sdev)->send_msg(sdev, msg);
+}
+
+/* host DMA trace */
+static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev,
+ u32 *stream_tag)
+{
+ if (sof_ops(sdev)->trace_init)
+ return sof_ops(sdev)->trace_init(sdev, stream_tag);
+
+ return 0;
+}
+
+static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev)
+{
+ if (sof_ops(sdev)->trace_release)
+ return sof_ops(sdev)->trace_release(sdev);
+
+ return 0;
+}
+
+static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd)
+{
+ if (sof_ops(sdev)->trace_trigger)
+ return sof_ops(sdev)->trace_trigger(sdev, cmd);
+
+ return 0;
+}
+
+/* host PCM ops */
+static inline int
+snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
+ return sof_ops(sdev)->pcm_open(sdev, substream);
+
+ return 0;
+}
+
+/* disconnect pcm substream to a host stream */
+static inline int
+snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
+ return sof_ops(sdev)->pcm_close(sdev, substream);
+
+ return 0;
+}
+
+/* host stream hw params */
+static inline int
+snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
+ return sof_ops(sdev)->pcm_hw_params(sdev, substream,
+ params, ipc_params);
+
+ return 0;
+}
+
+/* host stream trigger */
+static inline int
+snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
+ return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
+
+ return 0;
+}
+
+/* host DSP message data */
+static inline void snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
+}
+
+/* host configure DSP HW parameters */
+static inline int
+snd_sof_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ return sof_ops(sdev)->ipc_pcm_params(sdev, substream, reply);
+}
+
+/* host stream pointer */
+static inline snd_pcm_uframes_t
+snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ return 0;
+}
+
+static inline const struct snd_sof_dsp_ops
+*sof_get_ops(const struct sof_dev_desc *d,
+ const struct sof_ops_table mach_ops[], int asize)
+{
+ int i;
+
+ for (i = 0; i < asize; i++) {
+ if (d == mach_ops[i].desc)
+ return mach_ops[i].ops;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+/**
+ * snd_sof_dsp_register_poll_timeout - Periodically poll an address
+ * until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ might_sleep_if((__sleep_us) != 0); \
+ for (;;) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ if (cond) { \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg=%#x successful\n", (val)); \
+ break; \
+ } \
+ if (__timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = snd_sof_dsp_read(sdev, bar, offset); \
+ dev_dbg(sdev->dev, \
+ "FW Poll Status: reg=%#x timedout\n", (val)); \
+ break; \
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/* This is for registers bits with attribute RWC */
+bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 value);
+
+bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u64 mask, u64 value);
+
+void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
+ u32 offset, u32 mask, u32 value);
+
+int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
+ u32 mask, u32 target, u32 timeout_ms,
+ u32 interval_us);
+
+void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset);
+#endif
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
new file mode 100644
index 000000000000..649968841dad
--- /dev/null
+++ b/sound/soc/sof/pcm.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+// PCM Layer, interface between ALSA and IPC.
+//
+
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+#define DRV_NAME "sof-audio-component"
+
+/* Create DMA buffer page table for DSP */
+static int create_page_table(struct snd_pcm_substream *substream,
+ unsigned char *dma_area, size_t size)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+ int stream = substream->stream;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ return snd_sof_create_page_table(sdev, dmab,
+ spcm->stream[stream].page_table.area, size);
+}
+
+static int sof_pcm_dsp_params(struct snd_sof_pcm *spcm, struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ struct snd_sof_dev *sdev = spcm->sdev;
+ /* validate offset */
+ int ret = snd_sof_ipc_pcm_params(sdev, substream, reply);
+
+ if (ret < 0)
+ dev_err(sdev->dev, "error: got wrong reply for PCM %d\n",
+ spcm->pcm.pcm_id);
+
+ return ret;
+}
+
+/*
+ * sof pcm period elapse work
+ */
+static void sof_pcm_period_elapsed_work(struct work_struct *work)
+{
+ struct snd_sof_pcm_stream *sps =
+ container_of(work, struct snd_sof_pcm_stream,
+ period_elapsed_work);
+
+ snd_pcm_period_elapsed(sps->substream);
+}
+
+/*
+ * sof pcm period elapse, this could be called at irq thread context.
+ */
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm) {
+ dev_err(sdev->dev,
+ "error: period elapsed for unknown stream!\n");
+ return;
+ }
+
+ /*
+ * snd_pcm_period_elapsed() can be called in interrupt context
+ * before IRQ_HANDLED is returned. Inside snd_pcm_period_elapsed(),
+ * when the PCM is done draining or xrun happened, a STOP IPC will
+ * then be sent and this IPC will hit IPC timeout.
+ * To avoid sending IPC before the previous IPC is handled, we
+ * schedule delayed work here to call the snd_pcm_period_elapsed().
+ */
+ schedule_work(&spcm->stream[substream->stream].period_elapsed_work);
+}
+EXPORT_SYMBOL(snd_sof_pcm_period_elapsed);
+
+/* this may get called several times by oss emulation */
+static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_pcm_params pcm;
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(sdev->dev, "pcm: hw params stream %d dir %d\n",
+ spcm->pcm.pcm_id, substream->stream);
+
+ memset(&pcm, 0, sizeof(pcm));
+
+ /* allocate audio buffer pages */
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: could not allocate %d bytes for PCM %d\n",
+ params_buffer_bytes(params), spcm->pcm.pcm_id);
+ return ret;
+ }
+ if (ret) {
+ /*
+ * ret == 1 means the buffer is changed
+ * create compressed page table for audio firmware
+ * ret == 0 means the buffer is not changed
+ * so no need to regenerate the page table
+ */
+ ret = create_page_table(substream, runtime->dma_area,
+ runtime->dma_bytes);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* number of pages should be rounded up */
+ pcm.params.buffer.pages = PFN_UP(runtime->dma_bytes);
+
+ /* set IPC PCM parameters */
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = spcm->stream[substream->stream].comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.buffer.phy_addr =
+ spcm->stream[substream->stream].page_table.addr;
+ pcm.params.buffer.size = runtime->dma_bytes;
+ pcm.params.direction = substream->stream;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* container size */
+ ret = snd_pcm_format_physical_width(params_format(params));
+ if (ret < 0)
+ return ret;
+ pcm.params.sample_container_bytes = ret >> 3;
+
+ /* format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ case SNDRV_PCM_FORMAT_FLOAT:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_FLOAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* firmware already configured host stream */
+ ret = snd_sof_pcm_platform_hw_params(sdev,
+ substream,
+ params,
+ &pcm.params);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: platform hw params failed\n");
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "stream_tag %d", pcm.params.stream_tag);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, pcm.hdr.cmd, &pcm, sizeof(pcm),
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hw params ipc failed for stream %d\n",
+ pcm.params.stream_tag);
+ return ret;
+ }
+
+ ret = sof_pcm_dsp_params(spcm, substream, &ipc_params_reply);
+ if (ret < 0)
+ return ret;
+
+ /* save pcm hw_params */
+ memcpy(&spcm->params[substream->stream], params, sizeof(*params));
+
+ INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
+ sof_pcm_period_elapsed_work);
+
+ return ret;
+}
+
+static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
+ substream->stream);
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+
+ snd_pcm_lib_free_pages(substream);
+
+ cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
+
+ return ret;
+}
+
+static int sof_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /*
+ * check if hw_params needs to be set-up again.
+ * This is only needed when resuming from system sleep.
+ */
+ if (!spcm->hw_params_upon_resume[substream->stream])
+ return 0;
+
+ dev_dbg(sdev->dev, "pcm: prepare stream %d dir %d\n", spcm->pcm.pcm_id,
+ substream->stream);
+
+ /* set hw_params */
+ ret = sof_pcm_hw_params(substream, &spcm->params[substream->stream]);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: set pcm hw_params after resume\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * FE dai link trigger actions are always executed in non-atomic context because
+ * they involve IPC's.
+ */
+static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(sdev->dev, "pcm: trigger stream %d dir %d cmd %d\n",
+ spcm->pcm.pcm_id, substream->stream, cmd);
+
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* set up hw_params */
+ ret = sof_pcm_prepare(substream);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set up hw_params upon resume\n");
+ return ret;
+ }
+
+ /* fallthrough */
+ case SNDRV_PCM_TRIGGER_START:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+ break;
+ default:
+ dev_err(sdev->dev, "error: unhandled trigger cmd %d\n", cmd);
+ return -EINVAL;
+ }
+
+ snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+
+ if (ret < 0 || cmd != SNDRV_PCM_TRIGGER_SUSPEND)
+ return ret;
+
+ /*
+ * The hw_free op is usually called when the pcm stream is closed.
+ * Since the stream is not closed during suspend, the DSP needs to be
+ * notified explicitly to free pcm to prevent errors upon resume.
+ */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE;
+ stream.comp_id = spcm->stream[substream->stream].comp_id;
+
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+}
+
+static snd_pcm_uframes_t sof_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ snd_pcm_uframes_t host, dai;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ /* use dsp ops pointer callback directly if set */
+ if (sof_ops(sdev)->pcm_pointer)
+ return sof_ops(sdev)->pcm_pointer(sdev, substream);
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ /* read position from DSP */
+ host = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.host_posn);
+ dai = bytes_to_frames(substream->runtime,
+ spcm->stream[substream->stream].posn.dai_posn);
+
+ dev_dbg(sdev->dev, "PCM: stream %d dir %d DMA position %lu DAI position %lu\n",
+ spcm->pcm.pcm_id, substream->stream, host, dai);
+
+ return host;
+}
+
+static int sof_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int ret;
+ int err;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(sdev->dev, "pcm: open stream %d dir %d\n", spcm->pcm.pcm_id,
+ substream->stream);
+
+ /* clear hw_params_upon_resume flag */
+ spcm->hw_params_upon_resume[substream->stream] = 0;
+
+ caps = &spcm->pcm.caps[substream->stream];
+
+ ret = pm_runtime_get_sync(sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: pcm open failed to resume %d\n",
+ ret);
+ pm_runtime_put_noidle(sdev->dev);
+ return ret;
+ }
+
+ /* set any runtime constraints based on topology */
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ le32_to_cpu(caps->period_size_min));
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ le32_to_cpu(caps->period_size_min));
+
+ /* set runtime config */
+ runtime->hw.info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
+ runtime->hw.formats = le64_to_cpu(caps->formats);
+ runtime->hw.period_bytes_min = le32_to_cpu(caps->period_size_min);
+ runtime->hw.period_bytes_max = le32_to_cpu(caps->period_size_max);
+ runtime->hw.periods_min = le32_to_cpu(caps->periods_min);
+ runtime->hw.periods_max = le32_to_cpu(caps->periods_max);
+
+ /*
+ * caps->buffer_size_min is not used since the
+ * snd_pcm_hardware structure only defines buffer_bytes_max
+ */
+ runtime->hw.buffer_bytes_max = le32_to_cpu(caps->buffer_size_max);
+
+ dev_dbg(sdev->dev, "period min %zd max %zd bytes\n",
+ runtime->hw.period_bytes_min,
+ runtime->hw.period_bytes_max);
+ dev_dbg(sdev->dev, "period count %d max %d\n",
+ runtime->hw.periods_min,
+ runtime->hw.periods_max);
+ dev_dbg(sdev->dev, "buffer max %zd bytes\n",
+ runtime->hw.buffer_bytes_max);
+
+ /* set wait time - TODO: come from topology */
+ substream->wait_time = 500;
+
+ spcm->stream[substream->stream].posn.host_posn = 0;
+ spcm->stream[substream->stream].posn.dai_posn = 0;
+ spcm->stream[substream->stream].substream = substream;
+
+ ret = snd_sof_pcm_platform_open(sdev, substream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: pcm open failed %d\n",
+ ret);
+
+ pm_runtime_mark_last_busy(sdev->dev);
+
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err(sdev->dev, "error: pcm close failed to idle %d\n",
+ err);
+ }
+
+ return ret;
+}
+
+static int sof_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ int err;
+
+ /* nothing to do for BE */
+ if (rtd->dai_link->no_pcm)
+ return 0;
+
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ dev_dbg(sdev->dev, "pcm: close stream %d dir %d\n", spcm->pcm.pcm_id,
+ substream->stream);
+
+ err = snd_sof_pcm_platform_close(sdev, substream);
+ if (err < 0) {
+ dev_err(sdev->dev, "error: pcm close failed %d\n",
+ err);
+ /*
+ * keep going, no point in preventing the close
+ * from happening
+ */
+ }
+
+ pm_runtime_mark_last_busy(sdev->dev);
+
+ err = pm_runtime_put_autosuspend(sdev->dev);
+ if (err < 0)
+ dev_err(sdev->dev, "error: pcm close failed to idle %d\n",
+ err);
+
+ return 0;
+}
+
+static struct snd_pcm_ops sof_pcm_ops = {
+ .open = sof_pcm_open,
+ .close = sof_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = sof_pcm_hw_params,
+ .prepare = sof_pcm_prepare,
+ .hw_free = sof_pcm_hw_free,
+ .trigger = sof_pcm_trigger,
+ .pointer = sof_pcm_pointer,
+ .page = snd_pcm_sgbuf_ops_page,
+};
+
+/*
+ * Pre-allocate playback/capture audio buffer pages.
+ * no need to explicitly release memory preallocated by sof_pcm_new in pcm_free
+ * snd_pcm_lib_preallocate_free_for_all() is called by the core.
+ */
+static int sof_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pcm *spcm;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_soc_tplg_stream_caps *caps;
+ int stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ /* find SOF PCM for this RTD */
+ spcm = snd_sof_find_spcm_dai(sdev, rtd);
+ if (!spcm) {
+ dev_warn(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
+ rtd->dai_link->id);
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "creating new PCM %s\n", spcm->pcm.pcm_name);
+
+ /* do we need to pre-allocate playback audio buffer pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate playback audio buffer pages */
+ dev_dbg(sdev->dev, "spcm: allocate %s playback DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ snd_pcm_lib_preallocate_pages(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ le32_to_cpu(caps->buffer_size_min),
+ le32_to_cpu(caps->buffer_size_max));
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to pre-allocate capture audio buffer pages */
+ if (!spcm->pcm.capture)
+ return 0;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* pre-allocate capture audio buffer pages */
+ dev_dbg(sdev->dev, "spcm: allocate %s capture DMA buffer size 0x%x max 0x%x\n",
+ caps->name, caps->buffer_size_min, caps->buffer_size_max);
+
+ snd_pcm_lib_preallocate_pages(pcm->streams[stream].substream,
+ SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ le32_to_cpu(caps->buffer_size_min),
+ le32_to_cpu(caps->buffer_size_max));
+
+ return 0;
+}
+
+/* fixup the BE DAI link to match any values from topology */
+static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_dai *dai =
+ snd_sof_find_dai(sdev, (char *)rtd->dai_link->name);
+
+ /* no topology exists for this BE, try a common configuration */
+ if (!dai) {
+ dev_warn(sdev->dev, "warning: no topology found for BE DAI %s config\n",
+ rtd->dai_link->name);
+
+ /* set 48k, stereo, 16bits by default */
+ rate->min = 48000;
+ rate->max = 48000;
+
+ channels->min = 2;
+ channels->max = 2;
+
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+ }
+
+ /* read format from topology */
+ snd_mask_none(fmt);
+
+ switch (dai->comp_dai.config.frame_fmt) {
+ case SOF_IPC_FRAME_S16_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ break;
+ case SOF_IPC_FRAME_S24_4LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ break;
+ case SOF_IPC_FRAME_S32_LE:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ break;
+ default:
+ dev_err(sdev->dev, "error: No available DAI format!\n");
+ return -EINVAL;
+ }
+
+ /* read rate and channels from topology */
+ switch (dai->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ rate->min = dai->dai_config->ssp.fsync_rate;
+ rate->max = dai->dai_config->ssp.fsync_rate;
+ channels->min = dai->dai_config->ssp.tdm_slots;
+ channels->max = dai->dai_config->ssp.tdm_slots;
+
+ dev_dbg(sdev->dev,
+ "rate_min: %d rate_max: %d\n", rate->min, rate->max);
+ dev_dbg(sdev->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ /* DMIC only supports 16 or 32 bit formats */
+ if (dai->comp_dai.config.frame_fmt == SOF_IPC_FRAME_S24_4LE) {
+ dev_err(sdev->dev,
+ "error: invalid fmt %d for DAI type %d\n",
+ dai->comp_dai.config.frame_fmt,
+ dai->dai_config->type);
+ }
+ break;
+ case SOF_DAI_INTEL_HDA:
+ /* do nothing for HDA dai_link */
+ break;
+ default:
+ dev_err(sdev->dev, "error: invalid DAI type %d\n",
+ dai->dai_config->type);
+ break;
+ }
+
+ return 0;
+}
+
+static int sof_pcm_probe(struct snd_soc_component *component)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *tplg_filename;
+ int ret;
+
+ /* load the default topology */
+ sdev->component = component;
+
+ tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s/%s",
+ plat_data->tplg_filename_prefix,
+ plat_data->tplg_filename);
+ if (!tplg_filename)
+ return -ENOMEM;
+
+ ret = snd_sof_load_topology(sdev, tplg_filename);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to load DSP topology %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Some platforms in SOF, ex: BYT, may not have their platform PM
+ * callbacks set. Increment the usage count so as to
+ * prevent the device from entering runtime suspend.
+ */
+ if (!sof_ops(sdev)->runtime_suspend || !sof_ops(sdev)->runtime_resume)
+ pm_runtime_get_noresume(sdev->dev);
+
+ return ret;
+}
+
+static void sof_pcm_remove(struct snd_soc_component *component)
+{
+ /* remove topology */
+ snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
+{
+ struct snd_soc_component_driver *pd = &sdev->plat_drv;
+ struct snd_sof_pdata *plat_data = sdev->pdata;
+ const char *drv_name;
+
+ drv_name = plat_data->machine->drv_name;
+
+ pd->name = "sof-audio-component";
+ pd->probe = sof_pcm_probe;
+ pd->remove = sof_pcm_remove;
+ pd->ops = &sof_pcm_ops;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
+ pd->compr_ops = &sof_compressed_ops;
+#endif
+ pd->pcm_new = sof_pcm_new;
+ pd->ignore_machine = drv_name;
+ pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
+ pd->be_pcm_base = SOF_BE_PCM_BASE;
+ pd->use_dai_pcm_id = true;
+ pd->topology_name_prefix = "sof";
+
+ /* increment module refcount when a pcm is opened */
+ pd->module_get_upon_open = 1;
+}
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
new file mode 100644
index 000000000000..8ef1d51025d8
--- /dev/null
+++ b/sound/soc/sof/pm.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include "ops.h"
+#include "sof-priv.h"
+
+static int sof_restore_kcontrols(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_control *scontrol;
+ int ipc_cmd, ctrl_type;
+ int ret = 0;
+
+ /* restore kcontrol values */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ /* reset readback offset for scontrol after resuming */
+ scontrol->readback_offset = 0;
+
+ /* notify DSP of kcontrol values */
+ switch (scontrol->cmd) {
+ case SOF_CTRL_CMD_VOLUME:
+ case SOF_CTRL_CMD_ENUM:
+ case SOF_CTRL_CMD_SWITCH:
+ ipc_cmd = SOF_IPC_COMP_SET_VALUE;
+ ctrl_type = SOF_CTRL_TYPE_VALUE_CHAN_SET;
+ ret = snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ ipc_cmd, ctrl_type,
+ scontrol->cmd,
+ true);
+ break;
+ case SOF_CTRL_CMD_BINARY:
+ ipc_cmd = SOF_IPC_COMP_SET_DATA;
+ ctrl_type = SOF_CTRL_TYPE_DATA_SET;
+ ret = snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
+ ipc_cmd, ctrl_type,
+ scontrol->cmd,
+ true);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed kcontrol value set for widget: %d\n",
+ scontrol->comp_id);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sof_restore_pipelines(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_widget *swidget;
+ struct snd_sof_route *sroute;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_dai *dai;
+ struct sof_ipc_comp_dai *comp_dai;
+ struct sof_ipc_cmd_hdr *hdr;
+ int ret;
+
+ /* restore pipeline components */
+ list_for_each_entry_reverse(swidget, &sdev->widget_list, list) {
+ struct sof_ipc_comp_reply r;
+
+ /* skip if there is no private data */
+ if (!swidget->private)
+ continue;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = swidget->private;
+ comp_dai = &dai->comp_dai;
+ ret = sof_ipc_tx_message(sdev->ipc,
+ comp_dai->comp.hdr.cmd,
+ comp_dai, sizeof(*comp_dai),
+ &r, sizeof(r));
+ break;
+ case snd_soc_dapm_scheduler:
+
+ /*
+ * During suspend, all DSP cores are powered off.
+ * Therefore upon resume, create the pipeline comp
+ * and power up the core that the pipeline is
+ * scheduled on.
+ */
+ pipeline = swidget->private;
+ ret = sof_load_pipeline_ipc(sdev, pipeline, &r);
+ break;
+ default:
+ hdr = swidget->private;
+ ret = sof_ipc_tx_message(sdev->ipc, hdr->cmd,
+ swidget->private, hdr->size,
+ &r, sizeof(r));
+ break;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to load widget type %d with ID: %d\n",
+ swidget->widget->id, swidget->comp_id);
+
+ return ret;
+ }
+ }
+
+ /* restore pipeline connections */
+ list_for_each_entry_reverse(sroute, &sdev->route_list, list) {
+ struct sof_ipc_pipe_comp_connect *connect;
+ struct sof_ipc_reply reply;
+
+ /* skip if there's no private data */
+ if (!sroute->private)
+ continue;
+
+ connect = sroute->private;
+
+ /* send ipc */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ connect->hdr.cmd,
+ connect, sizeof(*connect),
+ &reply, sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to load route sink %s control %s source %s\n",
+ sroute->route->sink,
+ sroute->route->control ? sroute->route->control
+ : "none",
+ sroute->route->source);
+
+ return ret;
+ }
+ }
+
+ /* restore dai links */
+ list_for_each_entry_reverse(dai, &sdev->dai_list, list) {
+ struct sof_ipc_reply reply;
+ struct sof_ipc_dai_config *config = dai->dai_config;
+
+ if (!config) {
+ dev_err(sdev->dev, "error: no config for DAI %s\n",
+ dai->name);
+ continue;
+ }
+
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config,
+ config->hdr.size,
+ &reply, sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set dai config for %s\n",
+ dai->name);
+
+ return ret;
+ }
+ }
+
+ /* complete pipeline */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ swidget->complete =
+ snd_sof_complete_pipeline(sdev, swidget);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* restore pipeline kcontrols */
+ ret = sof_restore_kcontrols(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: restoring kcontrols after resume\n");
+
+ return ret;
+}
+
+static int sof_send_pm_ipc(struct snd_sof_dev *sdev, int cmd)
+{
+ struct sof_ipc_pm_ctx pm_ctx;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_ctx, 0, sizeof(pm_ctx));
+
+ /* configure ctx save ipc message */
+ pm_ctx.hdr.size = sizeof(pm_ctx);
+ pm_ctx.hdr.cmd = SOF_IPC_GLB_PM_MSG | cmd;
+
+ /* send ctx save ipc to dsp */
+ return sof_ipc_tx_message(sdev->ipc, pm_ctx.hdr.cmd, &pm_ctx,
+ sizeof(pm_ctx), &reply, sizeof(reply));
+}
+
+static void sof_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ snd_pcm_state_t state;
+ int dir;
+
+ /*
+ * SOF requires hw_params to be set-up internally upon resume.
+ * So, set the flag to indicate this for those streams that
+ * have been suspended.
+ */
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for (dir = 0; dir <= SNDRV_PCM_STREAM_CAPTURE; dir++) {
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ state = substream->runtime->status->state;
+ if (state == SNDRV_PCM_STATE_SUSPENDED)
+ spcm->hw_params_upon_resume[dir] = 1;
+ }
+ }
+
+ /* set internal flag for BE */
+ snd_sof_dsp_hw_params_upon_resume(sdev);
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+static void sof_cache_debugfs(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ list_for_each_entry(dfse, &sdev->dfsentry_list, list) {
+
+ /* nothing to do if debugfs buffer is not IO mem */
+ if (dfse->type == SOF_DFSENTRY_TYPE_BUF)
+ continue;
+
+ /* cache memory that is only accessible in D0 */
+ if (dfse->access_type == SOF_DEBUGFS_ACCESS_D0_ONLY)
+ memcpy_fromio(dfse->cache_buf, dfse->io_mem,
+ dfse->size);
+ }
+}
+#endif
+
+static int sof_resume(struct device *dev, bool runtime_resume)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ /* do nothing if dsp resume callbacks are not set */
+ if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume)
+ return 0;
+
+ /*
+ * if the runtime_resume flag is set, call the runtime_resume routine
+ * or else call the system resume routine
+ */
+ if (runtime_resume)
+ ret = snd_sof_dsp_runtime_resume(sdev);
+ else
+ ret = snd_sof_dsp_resume(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to power up DSP after resume\n");
+ return ret;
+ }
+
+ /* load the firmware */
+ ret = snd_sof_load_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to load DSP firmware after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ /* boot the firmware */
+ ret = snd_sof_run_firmware(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to boot DSP firmware after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ /* resume DMA trace, only need send ipc */
+ ret = snd_sof_init_trace_ipc(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to init trace after resume %d\n",
+ ret);
+ }
+
+ /* restore pipelines */
+ ret = sof_restore_pipelines(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to restore pipeline after resume %d\n",
+ ret);
+ return ret;
+ }
+
+ /* notify DSP of system resume */
+ ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: ctx_restore ipc error during resume %d\n",
+ ret);
+
+ return ret;
+}
+
+static int sof_suspend(struct device *dev, bool runtime_suspend)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ /* do nothing if dsp suspend callback is not set */
+ if (!sof_ops(sdev)->suspend)
+ return 0;
+
+ /* release trace */
+ snd_sof_release_trace(sdev);
+
+ /* set restore_stream for all streams during system suspend */
+ if (!runtime_suspend)
+ sof_set_hw_params_upon_resume(sdev);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ /* cache debugfs contents during runtime suspend */
+ if (runtime_suspend)
+ sof_cache_debugfs(sdev);
+#endif
+ /* notify DSP of upcoming power down */
+ ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: ctx_save ipc error during suspend %d\n",
+ ret);
+ return ret;
+ }
+
+ /* power down all DSP cores */
+ if (runtime_suspend)
+ ret = snd_sof_dsp_runtime_suspend(sdev, 0);
+ else
+ ret = snd_sof_dsp_suspend(sdev, 0);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to power down DSP during suspend %d\n",
+ ret);
+
+ return ret;
+}
+
+int snd_sof_runtime_suspend(struct device *dev)
+{
+ return sof_suspend(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_suspend);
+
+int snd_sof_runtime_resume(struct device *dev)
+{
+ return sof_resume(dev, true);
+}
+EXPORT_SYMBOL(snd_sof_runtime_resume);
+
+int snd_sof_resume(struct device *dev)
+{
+ return sof_resume(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_resume);
+
+int snd_sof_suspend(struct device *dev)
+{
+ return sof_suspend(dev, false);
+}
+EXPORT_SYMBOL(snd_sof_suspend);
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
new file mode 100644
index 000000000000..e9cf69874b5b
--- /dev/null
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#ifdef CONFIG_X86
+#include <asm/iosf_mbi.h>
+#endif
+
+#include "ops.h"
+
+/* platform specific devices */
+#include "intel/shim.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HASWELL)
+static const struct sof_dev_desc sof_acpi_haswell_desc = {
+ .machines = snd_soc_acpi_intel_haswell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = 0,
+ .chip_info = &hsw_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-hsw.ri",
+ .nocodec_tplg_filename = "sof-hsw-nocodec.tplg",
+ .ops = &sof_hsw_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+static const struct sof_dev_desc sof_acpi_broadwell_desc = {
+ .machines = snd_soc_acpi_intel_broadwell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = 0,
+ .chip_info = &bdw_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-bdw.ri",
+ .nocodec_tplg_filename = "sof-bdw-nocodec.tplg",
+ .ops = &sof_bdw_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+
+/* BYTCR uses different IRQ index */
+static const struct sof_dev_desc sof_acpi_baytrailcr_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 0,
+ .chip_info = &byt_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+
+static const struct sof_dev_desc sof_acpi_baytrail_desc = {
+ .machines = snd_soc_acpi_intel_baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &byt_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt-nocodec.tplg",
+ .ops = &sof_byt_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+
+#ifdef CONFIG_X86 /* TODO: move this to common helper */
+
+static bool is_byt_cr(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int status;
+
+ if (iosf_mbi_available()) {
+ u32 bios_status;
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, /* 0x04 PUNIT */
+ MBI_REG_READ, /* 0x10 */
+ 0x006, /* BIOS_CONFIG */
+ &bios_status);
+
+ if (status) {
+ dev_err(dev, "could not read PUNIT BIOS_CONFIG\n");
+ } else {
+ /* bits 26:27 mirror PMIC options */
+ bios_status = (bios_status >> 26) & 3;
+
+ if (bios_status == 1 || bios_status == 3) {
+ dev_info(dev, "Detected Baytrail-CR platform\n");
+ return true;
+ }
+
+ dev_info(dev, "BYT-CR not detected\n");
+ }
+ } else {
+ dev_info(dev, "IOSF_MBI not available, no BYT-CR detection\n");
+ }
+
+ if (platform_get_resource(pdev, IORESOURCE_IRQ, 5) == NULL) {
+ /*
+ * Some devices detected as BYT-T have only a single IRQ listed,
+ * causing platform_get_irq with index 5 to return -ENXIO.
+ * The correct IRQ in this case is at index 0, as on BYT-CR.
+ */
+ dev_info(dev, "Falling back to Baytrail-CR platform\n");
+ return true;
+ }
+
+ return false;
+}
+#else
+static int is_byt_cr(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static const struct sof_dev_desc sof_acpi_cherrytrail_desc = {
+ .machines = snd_soc_acpi_intel_cherrytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_imr_base = 2,
+ .irqindex_host_ipc = 5,
+ .chip_info = &cht_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-cht.ri",
+ .nocodec_tplg_filename = "sof-cht-nocodec.tplg",
+ .ops = &sof_cht_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+
+#endif
+
+static const struct dev_pm_ops sof_acpi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ NULL)
+};
+
+static void sof_acpi_probe_complete(struct device *dev)
+{
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+}
+
+static int sof_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct sof_dev_desc *desc;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_sof_pdata *sof_pdata;
+ const struct snd_sof_dsp_ops *ops;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ACPI DSP detected");
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ if (desc == &sof_acpi_baytrail_desc && is_byt_cr(pdev))
+ desc = &sof_acpi_baytrailcr_desc;
+#endif
+
+ /* get ops for platform */
+ ops = desc->ops;
+ if (!ops) {
+ dev_err(dev, "error: no matching ACPI descriptor ops\n");
+ return -ENODEV;
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE)
+ /* force nocodec mode */
+ dev_warn(dev, "Force to use nocodec mode\n");
+ mach = devm_kzalloc(dev, sizeof(*mach), GFP_KERNEL);
+ if (!mach)
+ return -ENOMEM;
+ ret = sof_nocodec_setup(dev, sof_pdata, mach, desc, ops);
+ if (ret < 0)
+ return ret;
+#else
+ /* find machine */
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(dev, "warning: No matching ASoC machine driver found\n");
+ } else {
+ sof_pdata->fw_filename = mach->sof_fw_filename;
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ }
+#endif
+
+ if (mach) {
+ mach->mach_params.platform = dev_name(dev);
+ mach->mach_params.acpi_ipc_irq_index = desc->irqindex_host_ipc;
+ }
+
+ sof_pdata->machine = mach;
+ sof_pdata->desc = desc;
+ sof_pdata->dev = &pdev->dev;
+ sof_pdata->platform = dev_name(dev);
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path)
+ sof_pdata->fw_filename_prefix = fw_path;
+ else
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path;
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ /* set callback to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_acpi_probe_complete;
+#endif
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+ if (ret) {
+ dev_err(dev, "error: failed to probe DSP hardware!\n");
+ return ret;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ sof_acpi_probe_complete(dev);
+#endif
+
+ return ret;
+}
+
+static int sof_acpi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pdev->dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id sof_acpi_match[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HASWELL)
+ { "INT33C8", (unsigned long)&sof_acpi_haswell_desc },
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+ { "INT3438", (unsigned long)&sof_acpi_broadwell_desc },
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ { "80860F28", (unsigned long)&sof_acpi_baytrail_desc },
+ { "808622A8", (unsigned long)&sof_acpi_cherrytrail_desc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sof_acpi_match);
+
+/* acpi_driver definition */
+static struct platform_driver snd_sof_acpi_driver = {
+ .probe = sof_acpi_probe,
+ .remove = sof_acpi_remove,
+ .driver = {
+ .name = "sof-audio-acpi",
+ .pm = &sof_acpi_pm,
+ .acpi_match_table = ACPI_PTR(sof_acpi_match),
+ },
+};
+module_platform_driver(snd_sof_acpi_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
new file mode 100644
index 000000000000..b778dffb2d25
--- /dev/null
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "ops.h"
+
+/* platform specific devices */
+#include "intel/shim.h"
+#include "intel/hda.h"
+
+static char *fw_path;
+module_param(fw_path, charp, 0444);
+MODULE_PARM_DESC(fw_path, "alternate path for SOF firmware.");
+
+static char *tplg_path;
+module_param(tplg_path, charp, 0444);
+MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+static const struct sof_dev_desc bxt_desc = {
+ .machines = snd_soc_acpi_intel_bxt_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &apl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-apl.ri",
+ .nocodec_tplg_filename = "sof-apl-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+static const struct sof_dev_desc glk_desc = {
+ .machines = snd_soc_acpi_intel_glk_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &apl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-glk.ri",
+ .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ .ops = &sof_apl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+static struct snd_soc_acpi_mach sof_tng_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "edison",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt.tplg",
+ },
+ {}
+};
+
+static const struct sof_dev_desc tng_desc = {
+ .machines = sof_tng_machines,
+ .resindex_lpe_base = 3, /* IRAM, but subtract IRAM offset */
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = 0,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &tng_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-byt.ri",
+ .nocodec_tplg_filename = "sof-byt.tplg",
+ .ops = &sof_tng_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+static const struct sof_dev_desc cnl_desc = {
+ .machines = snd_soc_acpi_intel_cnl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+static const struct sof_dev_desc cfl_desc = {
+ .machines = snd_soc_acpi_intel_cnl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+static const struct sof_dev_desc icl_desc = {
+ .machines = snd_soc_acpi_intel_icl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &cnl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-icl.ri",
+ .nocodec_tplg_filename = "sof-icl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
+static const struct sof_dev_desc skl_desc = {
+ .machines = snd_soc_acpi_intel_skl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &skl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-skl.ri",
+ .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
+static const struct sof_dev_desc kbl_desc = {
+ .machines = snd_soc_acpi_intel_kbl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &skl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-kbl.ri",
+ .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ .ops = &sof_skl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
+static const struct dev_pm_ops sof_pci_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
+ SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
+ NULL)
+};
+
+static void sof_pci_probe_complete(struct device *dev)
+{
+ dev_dbg(dev, "Completing SOF PCI probe");
+
+ /* allow runtime_pm */
+ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * runtime pm for pci device is "forbidden" by default.
+ * so call pm_runtime_allow() to enable it.
+ */
+ pm_runtime_allow(dev);
+
+ /* follow recommendation in pci-driver.c to decrement usage counter */
+ pm_runtime_put_noidle(dev);
+}
+
+static int sof_pci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct device *dev = &pci->dev;
+ const struct sof_dev_desc *desc =
+ (const struct sof_dev_desc *)pci_id->driver_data;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_sof_pdata *sof_pdata;
+ const struct snd_sof_dsp_ops *ops;
+ int ret;
+
+ dev_dbg(&pci->dev, "PCI DSP detected");
+
+ /* get ops for platform */
+ ops = desc->ops;
+ if (!ops) {
+ dev_err(dev, "error: no matching PCI descriptor ops\n");
+ return -ENODEV;
+ }
+
+ sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
+ if (!sof_pdata)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_request_regions(pci, "Audio DSP");
+ if (ret < 0)
+ return ret;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE)
+ /* force nocodec mode */
+ dev_warn(dev, "Force to use nocodec mode\n");
+ mach = devm_kzalloc(dev, sizeof(*mach), GFP_KERNEL);
+ if (!mach) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+ ret = sof_nocodec_setup(dev, sof_pdata, mach, desc, ops);
+ if (ret < 0)
+ goto release_regions;
+
+#else
+ /* find machine */
+ mach = snd_soc_acpi_find_machine(desc->machines);
+ if (!mach) {
+ dev_warn(dev, "warning: No matching ASoC machine driver found\n");
+ } else {
+ mach->mach_params.platform = dev_name(dev);
+ sof_pdata->fw_filename = mach->sof_fw_filename;
+ sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ }
+#endif /* CONFIG_SND_SOC_SOF_FORCE_NOCODEC_MODE */
+
+ sof_pdata->name = pci_name(pci);
+ sof_pdata->machine = mach;
+ sof_pdata->desc = (struct sof_dev_desc *)pci_id->driver_data;
+ sof_pdata->dev = dev;
+ sof_pdata->platform = dev_name(dev);
+
+ /* alternate fw and tplg filenames ? */
+ if (fw_path)
+ sof_pdata->fw_filename_prefix = fw_path;
+ else
+ sof_pdata->fw_filename_prefix =
+ sof_pdata->desc->default_fw_path;
+
+ if (tplg_path)
+ sof_pdata->tplg_filename_prefix = tplg_path;
+ else
+ sof_pdata->tplg_filename_prefix =
+ sof_pdata->desc->default_tplg_path;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ /* set callback to enable runtime_pm */
+ sof_pdata->sof_probe_complete = sof_pci_probe_complete;
+#endif
+ /* call sof helper for DSP hardware probe */
+ ret = snd_sof_device_probe(dev, sof_pdata);
+ if (ret) {
+ dev_err(dev, "error: failed to probe DSP hardware!\n");
+ goto release_regions;
+ }
+
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ sof_pci_probe_complete(dev);
+#endif
+
+ return ret;
+
+release_regions:
+ pci_release_regions(pci);
+
+ return ret;
+}
+
+static void sof_pci_remove(struct pci_dev *pci)
+{
+ /* call sof helper for DSP hardware remove */
+ snd_sof_device_remove(&pci->dev);
+
+ /* follow recommendation in pci-driver.c to increment usage counter */
+ pm_runtime_get_noresume(&pci->dev);
+
+ /* release pci regions and disable device */
+ pci_release_regions(pci);
+}
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+ { PCI_DEVICE(0x8086, 0x119a),
+ .driver_data = (unsigned long)&tng_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ /* BXT-P & Apollolake */
+ { PCI_DEVICE(0x8086, 0x5a98),
+ .driver_data = (unsigned long)&bxt_desc},
+ { PCI_DEVICE(0x8086, 0x1a98),
+ .driver_data = (unsigned long)&bxt_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+ { PCI_DEVICE(0x8086, 0x3198),
+ .driver_data = (unsigned long)&glk_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+ { PCI_DEVICE(0x8086, 0x9dc8),
+ .driver_data = (unsigned long)&cnl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+ { PCI_DEVICE(0x8086, 0xa348),
+ .driver_data = (unsigned long)&cfl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
+ { PCI_DEVICE(0x8086, 0x9d71),
+ .driver_data = (unsigned long)&kbl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
+ { PCI_DEVICE(0x8086, 0x9d70),
+ .driver_data = (unsigned long)&skl_desc},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+ { PCI_DEVICE(0x8086, 0x34C8),
+ .driver_data = (unsigned long)&icl_desc},
+#endif
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_driver = {
+ .name = "sof-audio-pci",
+ .id_table = sof_pci_ids,
+ .probe = sof_pci_probe,
+ .remove = sof_pci_remove,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
new file mode 100644
index 000000000000..1e85d6f9c5c3
--- /dev/null
+++ b/sound/soc/sof/sof-priv.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_SOF_PRIV_H
+#define __SOUND_SOC_SOF_PRIV_H
+
+#include <linux/device.h>
+
+#include <sound/hdaudio.h>
+#include <sound/soc.h>
+
+#include <sound/sof.h>
+#include <sound/sof/stream.h> /* needs to be included before control.h */
+#include <sound/sof/control.h>
+#include <sound/sof/dai.h>
+#include <sound/sof/info.h>
+#include <sound/sof/pm.h>
+#include <sound/sof/topology.h>
+#include <sound/sof/trace.h>
+
+#include <uapi/sound/sof/fw.h>
+
+/* debug flags */
+#define SOF_DBG_REGS BIT(1)
+#define SOF_DBG_MBOX BIT(2)
+#define SOF_DBG_TEXT BIT(3)
+#define SOF_DBG_PCI BIT(4)
+
+/* max BARs mmaped devices can use */
+#define SND_SOF_BARS 8
+
+/* time in ms for runtime suspend delay */
+#define SND_SOF_SUSPEND_DELAY_MS 2000
+
+/* DMA buffer size for trace */
+#define DMA_BUF_SIZE_FOR_TRACE (PAGE_SIZE * 16)
+
+/* max number of FE PCMs before BEs */
+#define SOF_BE_PCM_BASE 16
+
+#define SOF_IPC_DSP_REPLY 0
+#define SOF_IPC_HOST_REPLY 1
+
+/* convenience constructor for DAI driver streams */
+#define SOF_DAI_STREAM(sname, scmin, scmax, srates, sfmt) \
+ {.stream_name = sname, .channels_min = scmin, .channels_max = scmax, \
+ .rates = srates, .formats = sfmt}
+
+#define SOF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT)
+
+struct snd_sof_dev;
+struct snd_sof_ipc_msg;
+struct snd_sof_ipc;
+struct snd_sof_debugfs_map;
+struct snd_soc_tplg_ops;
+struct snd_soc_component;
+struct snd_sof_pdata;
+
+/*
+ * SOF DSP HW abstraction operations.
+ * Used to abstract DSP HW architecture and any IO busses between host CPU
+ * and DSP device(s).
+ */
+struct snd_sof_dsp_ops {
+
+ /* probe and remove */
+ int (*probe)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*remove)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* DSP core boot / reset */
+ int (*run)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*stall)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*reset)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*core_power_up)(struct snd_sof_dev *sof_dev,
+ unsigned int core_mask); /* optional */
+ int (*core_power_down)(struct snd_sof_dev *sof_dev,
+ unsigned int core_mask); /* optional */
+
+ /*
+ * Register IO: only used by respective drivers themselves,
+ * TODO: consider removing these operations and calling respective
+ * implementations directly
+ */
+ void (*write)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u32 value); /* optional */
+ u32 (*read)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+ void (*write64)(struct snd_sof_dev *sof_dev, void __iomem *addr,
+ u64 value); /* optional */
+ u64 (*read64)(struct snd_sof_dev *sof_dev,
+ void __iomem *addr); /* optional */
+
+ /* memcpy IO */
+ void (*block_read)(struct snd_sof_dev *sof_dev, u32 bar,
+ u32 offset, void *dest,
+ size_t size); /* mandatory */
+ void (*block_write)(struct snd_sof_dev *sof_dev, u32 bar,
+ u32 offset, void *src,
+ size_t size); /* mandatory */
+
+ /* doorbell */
+ irqreturn_t (*irq_handler)(int irq, void *context); /* optional */
+ irqreturn_t (*irq_thread)(int irq, void *context); /* optional */
+
+ /* ipc */
+ int (*send_msg)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_ipc_msg *msg); /* mandatory */
+
+ /* FW loading */
+ int (*load_firmware)(struct snd_sof_dev *sof_dev); /* mandatory */
+ int (*load_module)(struct snd_sof_dev *sof_dev,
+ struct snd_sof_mod_hdr *hdr); /* optional */
+ /*
+ * FW ready checks for ABI compatibility and creates
+ * memory windows at first boot
+ */
+ int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* optional */
+
+ /* connect pcm substream to a host stream */
+ int (*pcm_open)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+ /* disconnect pcm substream to a host stream */
+ int (*pcm_close)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host stream hw params */
+ int (*pcm_hw_params)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc_stream_params *ipc_params); /* optional */
+
+ /* host stream trigger */
+ int (*pcm_trigger)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ int cmd); /* optional */
+
+ /* host stream pointer */
+ snd_pcm_uframes_t (*pcm_pointer)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream); /* optional */
+
+ /* host read DSP stream data */
+ void (*ipc_msg_data)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz); /* mandatory */
+
+ /* host configure DSP HW parameters */
+ int (*ipc_pcm_params)(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply); /* mandatory */
+
+ /* pre/post firmware run */
+ int (*pre_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*post_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* DSP PM */
+ int (*suspend)(struct snd_sof_dev *sof_dev, int state); /* optional */
+ int (*resume)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*runtime_suspend)(struct snd_sof_dev *sof_dev,
+ int state); /* optional */
+ int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
+ void (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
+
+ /* DSP clocking */
+ int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
+
+ /* debug */
+ const struct snd_sof_debugfs_map *debug_map; /* optional */
+ int debug_map_count; /* optional */
+ void (*dbg_dump)(struct snd_sof_dev *sof_dev,
+ u32 flags); /* optional */
+ void (*ipc_dump)(struct snd_sof_dev *sof_dev); /* optional */
+
+ /* host DMA trace initialization */
+ int (*trace_init)(struct snd_sof_dev *sdev,
+ u32 *stream_tag); /* optional */
+ int (*trace_release)(struct snd_sof_dev *sdev); /* optional */
+ int (*trace_trigger)(struct snd_sof_dev *sdev,
+ int cmd); /* optional */
+
+ /* DAI ops */
+ struct snd_soc_dai_driver *drv;
+ int num_drv;
+};
+
+/* DSP architecture specific callbacks for oops and stack dumps */
+struct sof_arch_ops {
+ void (*dsp_oops)(struct snd_sof_dev *sdev, void *oops);
+ void (*dsp_stack)(struct snd_sof_dev *sdev, void *oops,
+ u32 *stack, u32 stack_words);
+};
+
+#define sof_arch_ops(sdev) ((sdev)->pdata->desc->arch_ops)
+
+/* DSP device HW descriptor mapping between bus ID and ops */
+struct sof_ops_table {
+ const struct sof_dev_desc *desc;
+ const struct snd_sof_dsp_ops *ops;
+};
+
+enum sof_dfsentry_type {
+ SOF_DFSENTRY_TYPE_IOMEM = 0,
+ SOF_DFSENTRY_TYPE_BUF,
+};
+
+enum sof_debugfs_access_type {
+ SOF_DEBUGFS_ACCESS_ALWAYS = 0,
+ SOF_DEBUGFS_ACCESS_D0_ONLY,
+};
+
+/* FS entry for debug files that can expose DSP memories, registers */
+struct snd_sof_dfsentry {
+ struct dentry *dfsentry;
+ size_t size;
+ enum sof_dfsentry_type type;
+ /*
+ * access_type specifies if the
+ * memory -> DSP resource (memory, register etc) is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+ char *cache_buf; /* buffer to cache the contents of debugfs memory */
+#endif
+ struct snd_sof_dev *sdev;
+ struct list_head list; /* list in sdev dfsentry list */
+ union {
+ void __iomem *io_mem;
+ void *buf;
+ };
+};
+
+/* Debug mapping for any DSP memory or registers that can used for debug */
+struct snd_sof_debugfs_map {
+ const char *name;
+ u32 bar;
+ u32 offset;
+ u32 size;
+ /*
+ * access_type specifies if the memory is always accessible
+ * or if it is accessible only when the DSP is in D0.
+ */
+ enum sof_debugfs_access_type access_type;
+};
+
+/* mailbox descriptor, used for host <-> DSP IPC */
+struct snd_sof_mailbox {
+ u32 offset;
+ size_t size;
+};
+
+/* IPC message descriptor for host <-> DSP IO */
+struct snd_sof_ipc_msg {
+ /* message data */
+ u32 header;
+ void *msg_data;
+ void *reply_data;
+ size_t msg_size;
+ size_t reply_size;
+ int reply_error;
+
+ wait_queue_head_t waitq;
+ bool ipc_complete;
+};
+
+/* PCM stream, mapped to FW component */
+struct snd_sof_pcm_stream {
+ u32 comp_id;
+ struct snd_dma_buffer page_table;
+ struct sof_ipc_stream_posn posn;
+ struct snd_pcm_substream *substream;
+ struct work_struct period_elapsed_work;
+};
+
+/* ALSA SOF PCM device */
+struct snd_sof_pcm {
+ struct snd_sof_dev *sdev;
+ struct snd_soc_tplg_pcm pcm;
+ struct snd_sof_pcm_stream stream[2];
+ struct list_head list; /* list in sdev pcm list */
+ struct snd_pcm_hw_params params[2];
+ int hw_params_upon_resume[2]; /* set up hw_params upon resume */
+};
+
+/* ALSA SOF Kcontrol device */
+struct snd_sof_control {
+ struct snd_sof_dev *sdev;
+ int comp_id;
+ int num_channels;
+ u32 readback_offset; /* offset to mmaped data if used */
+ struct sof_ipc_ctrl_data *control_data;
+ u32 size; /* cdata size */
+ enum sof_ipc_ctrl_cmd cmd;
+ u32 *volume_table; /* volume table computed from tlv data*/
+
+ struct list_head list; /* list in sdev control list */
+};
+
+/* ASoC SOF DAPM widget */
+struct snd_sof_widget {
+ struct snd_sof_dev *sdev;
+ int comp_id;
+ int pipeline_id;
+ int complete;
+ int id;
+
+ struct snd_soc_dapm_widget *widget;
+ struct list_head list; /* list in sdev widget list */
+
+ void *private; /* core does not touch this */
+};
+
+/* ASoC SOF DAPM route */
+struct snd_sof_route {
+ struct snd_sof_dev *sdev;
+
+ struct snd_soc_dapm_route *route;
+ struct list_head list; /* list in sdev route list */
+
+ void *private;
+};
+
+/* ASoC DAI device */
+struct snd_sof_dai {
+ struct snd_sof_dev *sdev;
+ const char *name;
+
+ struct sof_ipc_comp_dai comp_dai;
+ struct sof_ipc_dai_config *dai_config;
+ struct list_head list; /* list in sdev dai list */
+};
+
+/*
+ * SOF Device Level.
+ */
+struct snd_sof_dev {
+ struct device *dev;
+ spinlock_t ipc_lock; /* lock for IPC users */
+ spinlock_t hw_lock; /* lock for HW IO access */
+
+ /*
+ * ASoC components. plat_drv fields are set dynamically so
+ * can't use const
+ */
+ struct snd_soc_component_driver plat_drv;
+
+ /* DSP firmware boot */
+ wait_queue_head_t boot_wait;
+ u32 boot_complete;
+ u32 first_boot;
+
+ /* work queue in case the probe is implemented in two steps */
+ struct work_struct probe_work;
+
+ /* DSP HW differentiation */
+ struct snd_sof_pdata *pdata;
+
+ /* IPC */
+ struct snd_sof_ipc *ipc;
+ struct snd_sof_mailbox dsp_box; /* DSP initiated IPC */
+ struct snd_sof_mailbox host_box; /* Host initiated IPC */
+ struct snd_sof_mailbox stream_box; /* Stream position update */
+ struct snd_sof_ipc_msg *msg;
+ int ipc_irq;
+ u32 next_comp_id; /* monotonic - reset during S3 */
+
+ /* memory bases for mmaped DSPs - set by dsp_init() */
+ void __iomem *bar[SND_SOF_BARS]; /* DSP base address */
+ int mmio_bar;
+ int mailbox_bar;
+ size_t dsp_oops_offset;
+
+ /* debug */
+ struct dentry *debugfs_root;
+ struct list_head dfsentry_list;
+
+ /* firmware loader */
+ struct snd_dma_buffer dmab;
+ struct snd_dma_buffer dmab_bdl;
+ struct sof_ipc_fw_ready fw_ready;
+ struct sof_ipc_fw_version fw_version;
+
+ /* topology */
+ struct snd_soc_tplg_ops *tplg_ops;
+ struct list_head pcm_list;
+ struct list_head kcontrol_list;
+ struct list_head widget_list;
+ struct list_head dai_list;
+ struct list_head route_list;
+ struct snd_soc_component *component;
+ u32 enabled_cores_mask; /* keep track of enabled cores */
+
+ /* FW configuration */
+ struct sof_ipc_dma_buffer_data *info_buffer;
+ struct sof_ipc_window *info_window;
+
+ /* IPC timeouts in ms */
+ int ipc_timeout;
+ int boot_timeout;
+
+ /* Wait queue for code loading */
+ wait_queue_head_t waitq;
+ int code_loading;
+
+ /* DMA for Trace */
+ struct snd_dma_buffer dmatb;
+ struct snd_dma_buffer dmatp;
+ int dma_trace_pages;
+ wait_queue_head_t trace_sleep;
+ u32 host_offset;
+ u32 dtrace_is_enabled;
+ u32 dtrace_error;
+ u32 msi_enabled;
+
+ void *private; /* core does not touch this */
+};
+
+/*
+ * Device Level.
+ */
+
+int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data);
+int snd_sof_device_remove(struct device *dev);
+
+int snd_sof_runtime_suspend(struct device *dev);
+int snd_sof_runtime_resume(struct device *dev);
+int snd_sof_resume(struct device *dev);
+int snd_sof_suspend(struct device *dev);
+
+void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
+
+int snd_sof_create_page_table(struct snd_sof_dev *sdev,
+ struct snd_dma_buffer *dmab,
+ unsigned char *page_table, size_t size);
+
+/*
+ * Firmware loading.
+ */
+int snd_sof_load_firmware(struct snd_sof_dev *sdev);
+int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev);
+int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev);
+int snd_sof_run_firmware(struct snd_sof_dev *sdev);
+int snd_sof_parse_module_memcpy(struct snd_sof_dev *sdev,
+ struct snd_sof_mod_hdr *module);
+void snd_sof_fw_unload(struct snd_sof_dev *sdev);
+int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset);
+
+/*
+ * IPC low level APIs.
+ */
+struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev);
+void snd_sof_ipc_free(struct snd_sof_dev *sdev);
+int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id);
+void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev);
+int snd_sof_ipc_stream_pcm_params(struct snd_sof_dev *sdev,
+ struct sof_ipc_pcm_params *params);
+int snd_sof_dsp_mailbox_init(struct snd_sof_dev *sdev, u32 dspbox,
+ size_t dspbox_size, u32 hostbox,
+ size_t hostbox_size);
+int snd_sof_ipc_valid(struct snd_sof_dev *sdev);
+int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes, void *reply_data,
+ size_t reply_bytes);
+struct snd_sof_widget *snd_sof_find_swidget(struct snd_sof_dev *sdev,
+ const char *name);
+struct snd_sof_widget *snd_sof_find_swidget_sname(struct snd_sof_dev *sdev,
+ const char *pcm_name,
+ int dir);
+struct snd_sof_dai *snd_sof_find_dai(struct snd_sof_dev *sdev,
+ const char *name);
+
+static inline
+struct snd_sof_pcm *snd_sof_find_spcm_dai(struct snd_sof_dev *sdev,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_sof_pcm *spcm = NULL;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (le32_to_cpu(spcm->pcm.dai_id) == rtd->dai_link->id)
+ return spcm;
+ }
+
+ return NULL;
+}
+
+struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_sof_dev *sdev,
+ const char *name);
+struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_sof_dev *sdev,
+ unsigned int comp_id,
+ int *direction);
+struct snd_sof_pcm *snd_sof_find_spcm_pcm_id(struct snd_sof_dev *sdev,
+ unsigned int pcm_id);
+void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream);
+
+/*
+ * Stream IPC
+ */
+int snd_sof_ipc_stream_posn(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm, int direction,
+ struct sof_ipc_stream_posn *posn);
+
+/*
+ * Mixer IPC
+ */
+int snd_sof_ipc_set_get_comp_data(struct snd_sof_ipc *ipc,
+ struct snd_sof_control *scontrol, u32 ipc_cmd,
+ enum sof_ipc_ctrl_type ctrl_type,
+ enum sof_ipc_ctrl_cmd ctrl_cmd,
+ bool send);
+
+/*
+ * Topology.
+ * There is no snd_sof_free_topology since topology components will
+ * be freed by snd_soc_unregister_component,
+ */
+int snd_sof_init_topology(struct snd_sof_dev *sdev,
+ struct snd_soc_tplg_ops *ops);
+int snd_sof_load_topology(struct snd_sof_dev *sdev, const char *file);
+int snd_sof_complete_pipeline(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget);
+
+int sof_load_pipeline_ipc(struct snd_sof_dev *sdev,
+ struct sof_ipc_pipe_new *pipeline,
+ struct sof_ipc_comp_reply *r);
+
+/*
+ * Trace/debug
+ */
+int snd_sof_init_trace(struct snd_sof_dev *sdev);
+void snd_sof_release_trace(struct snd_sof_dev *sdev);
+void snd_sof_free_trace(struct snd_sof_dev *sdev);
+int snd_sof_dbg_init(struct snd_sof_dev *sdev);
+void snd_sof_free_debug(struct snd_sof_dev *sdev);
+int snd_sof_debugfs_io_item(struct snd_sof_dev *sdev,
+ void __iomem *base, size_t size,
+ const char *name,
+ enum sof_debugfs_access_type access_type);
+int snd_sof_debugfs_buf_item(struct snd_sof_dev *sdev,
+ void *base, size_t size,
+ const char *name);
+int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn);
+void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev);
+void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
+ u32 tracep_code, void *oops,
+ struct sof_ipc_panic_info *panic_info,
+ void *stack, size_t stack_words);
+int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev);
+
+/*
+ * Platform specific ops.
+ */
+extern struct snd_compr_ops sof_compressed_ops;
+
+/*
+ * Kcontrols.
+ */
+
+int snd_sof_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *binary_data,
+ unsigned int size);
+int snd_sof_bytes_ext_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *binary_data,
+ unsigned int size);
+
+/*
+ * DSP Architectures.
+ */
+static inline void sof_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
+ u32 stack_words)
+{
+ if (sof_arch_ops(sdev)->dsp_stack)
+ sof_arch_ops(sdev)->dsp_stack(sdev, oops, stack, stack_words);
+}
+
+static inline void sof_oops(struct snd_sof_dev *sdev, void *oops)
+{
+ if (sof_arch_ops(sdev)->dsp_oops)
+ sof_arch_ops(sdev)->dsp_oops(sdev, oops);
+}
+
+extern const struct sof_arch_ops sof_xtensa_arch_ops;
+
+/*
+ * Utilities
+ */
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value);
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value);
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr);
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr);
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes);
+void sof_block_write(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *src,
+ size_t size);
+void sof_block_read(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *dest,
+ size_t size);
+
+void intel_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz);
+int intel_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply);
+
+int intel_pcm_open(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+int intel_pcm_close(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream);
+
+#endif
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
new file mode 100644
index 000000000000..c88afa872a58
--- /dev/null
+++ b/sound/soc/sof/topology.c
@@ -0,0 +1,3179 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <sound/tlv.h>
+#include <sound/pcm_params.h>
+#include <uapi/sound/sof/tokens.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+#define COMP_ID_UNASSIGNED 0xffffffff
+/*
+ * Constants used in the computation of linear volume gain
+ * from dB gain 20th root of 10 in Q1.16 fixed-point notation
+ */
+#define VOL_TWENTIETH_ROOT_OF_TEN 73533
+/* 40th root of 10 in Q1.16 fixed-point notation*/
+#define VOL_FORTIETH_ROOT_OF_TEN 69419
+/*
+ * Volume fractional word length define to 16 sets
+ * the volume linear gain value to use Qx.16 format
+ */
+#define VOLUME_FWL 16
+/* 0.5 dB step value in topology TLV */
+#define VOL_HALF_DB_STEP 50
+/* Full volume for default values */
+#define VOL_ZERO_DB BIT(VOLUME_FWL)
+
+/* TLV data items */
+#define TLV_ITEMS 3
+#define TLV_MIN 0
+#define TLV_STEP 1
+#define TLV_MUTE 2
+
+/* size of tplg abi in byte */
+#define SOF_TPLG_ABI_SIZE 3
+
+/* send pcm params ipc */
+static int ipc_pcm_params(struct snd_sof_widget *swidget, int dir)
+{
+ struct sof_ipc_pcm_params_reply ipc_params_reply;
+ struct snd_sof_dev *sdev = swidget->sdev;
+ struct sof_ipc_pcm_params pcm;
+ struct snd_pcm_hw_params *params;
+ struct snd_sof_pcm *spcm;
+ int ret = 0;
+
+ memset(&pcm, 0, sizeof(pcm));
+
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(sdev, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: cannot find PCM for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ params = &spcm->params[dir];
+
+ /* set IPC PCM params */
+ pcm.hdr.size = sizeof(pcm);
+ pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+ pcm.comp_id = swidget->comp_id;
+ pcm.params.hdr.size = sizeof(pcm.params);
+ pcm.params.direction = dir;
+ pcm.params.sample_valid_bytes = params_width(params) >> 3;
+ pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm.params.rate = params_rate(params);
+ pcm.params.channels = params_channels(params);
+ pcm.params.host_period_bytes = params_period_bytes(params);
+
+ /* set format */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S24_4LE;
+ break;
+ case SNDRV_PCM_FORMAT_S32:
+ pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, pcm.hdr.cmd, &pcm, sizeof(pcm),
+ &ipc_params_reply, sizeof(ipc_params_reply));
+ if (ret < 0)
+ dev_err(sdev->dev, "error: pcm params failed for %s\n",
+ swidget->widget->name);
+
+ return ret;
+}
+
+ /* send stream trigger ipc */
+static int ipc_trigger(struct snd_sof_widget *swidget, int cmd)
+{
+ struct snd_sof_dev *sdev = swidget->sdev;
+ struct sof_ipc_stream stream;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /* set IPC stream params */
+ stream.hdr.size = sizeof(stream);
+ stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | cmd;
+ stream.comp_id = swidget->comp_id;
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
+ sizeof(stream), &reply, sizeof(reply));
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to trigger %s\n",
+ swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_sof_widget *swidget = w->dobj.private;
+ struct snd_sof_dev *sdev;
+ int ret = 0;
+
+ if (!swidget)
+ return 0;
+
+ sdev = swidget->sdev;
+
+ dev_dbg(sdev->dev, "received event %d for widget %s\n",
+ event, w->name);
+
+ /* process events */
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* set pcm params */
+ ret = ipc_pcm_params(swidget, SOF_IPC_STREAM_CAPTURE);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set pcm params for widget %s\n",
+ swidget->widget->name);
+ break;
+ }
+
+ /* start trigger */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_START);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* stop trigger */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_STOP);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+
+ /* pcm free */
+ ret = ipc_trigger(swidget, SOF_IPC_STREAM_PCM_FREE);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: failed to trigger widget %s\n",
+ swidget->widget->name);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* event handlers for keyword detect component */
+static const struct snd_soc_tplg_widget_events sof_kwd_events[] = {
+ {SOF_KEYWORD_DETECT_DAPM_EVENT, sof_keyword_dapm_event},
+};
+
+static inline int get_tlv_data(const int *p, int tlv[TLV_ITEMS])
+{
+ /* we only support dB scale TLV type at the moment */
+ if ((int)p[SNDRV_CTL_TLVO_TYPE] != SNDRV_CTL_TLVT_DB_SCALE)
+ return -EINVAL;
+
+ /* min value in topology tlv data is multiplied by 100 */
+ tlv[TLV_MIN] = (int)p[SNDRV_CTL_TLVO_DB_SCALE_MIN] / 100;
+
+ /* volume steps */
+ tlv[TLV_STEP] = (int)(p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MASK);
+
+ /* mute ON/OFF */
+ if ((p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
+ TLV_DB_SCALE_MUTE) == 0)
+ tlv[TLV_MUTE] = 0;
+ else
+ tlv[TLV_MUTE] = 1;
+
+ return 0;
+}
+
+/*
+ * Function to truncate an unsigned 64-bit number
+ * by x bits and return 32-bit unsigned number. This
+ * function also takes care of rounding while truncating
+ */
+static inline u32 vol_shift_64(u64 i, u32 x)
+{
+ /* do not truncate more than 32 bits */
+ if (x > 32)
+ x = 32;
+
+ if (x == 0)
+ return (u32)i;
+
+ return (u32)(((i >> (x - 1)) + 1) >> 1);
+}
+
+/*
+ * Function to compute a ^ exp where,
+ * a is a fractional number represented by a fixed-point
+ * integer with a fractional world length of "fwl"
+ * exp is an integer
+ * fwl is the fractional word length
+ * Return value is a fractional number represented by a
+ * fixed-point integer with a fractional word length of "fwl"
+ */
+static u32 vol_pow32(u32 a, int exp, u32 fwl)
+{
+ int i, iter;
+ u32 power = 1 << fwl;
+ u64 numerator;
+
+ /* if exponent is 0, return 1 */
+ if (exp == 0)
+ return power;
+
+ /* determine the number of iterations based on the exponent */
+ if (exp < 0)
+ iter = exp * -1;
+ else
+ iter = exp;
+
+ /* mutiply a "iter" times to compute power */
+ for (i = 0; i < iter; i++) {
+ /*
+ * Product of 2 Qx.fwl fixed-point numbers yields a Q2*x.2*fwl
+ * Truncate product back to fwl fractional bits with rounding
+ */
+ power = vol_shift_64((u64)power * a, fwl);
+ }
+
+ if (exp > 0) {
+ /* if exp is positive, return the result */
+ return power;
+ }
+
+ /* if exp is negative, return the multiplicative inverse */
+ numerator = (u64)1 << (fwl << 1);
+ do_div(numerator, power);
+
+ return (u32)numerator;
+}
+
+/*
+ * Function to calculate volume gain from TLV data.
+ * This function can only handle gain steps that are multiples of 0.5 dB
+ */
+static u32 vol_compute_gain(u32 value, int *tlv)
+{
+ int dB_gain;
+ u32 linear_gain;
+ int f_step;
+
+ /* mute volume */
+ if (value == 0 && tlv[TLV_MUTE])
+ return 0;
+
+ /*
+ * compute dB gain from tlv. tlv_step
+ * in topology is multiplied by 100
+ */
+ dB_gain = tlv[TLV_MIN] + (value * tlv[TLV_STEP]) / 100;
+
+ /*
+ * compute linear gain represented by fixed-point
+ * int with VOLUME_FWL fractional bits
+ */
+ linear_gain = vol_pow32(VOL_TWENTIETH_ROOT_OF_TEN, dB_gain, VOLUME_FWL);
+
+ /* extract the fractional part of volume step */
+ f_step = tlv[TLV_STEP] - (tlv[TLV_STEP] / 100);
+
+ /* if volume step is an odd multiple of 0.5 dB */
+ if (f_step == VOL_HALF_DB_STEP && (value & 1))
+ linear_gain = vol_shift_64((u64)linear_gain *
+ VOL_FORTIETH_ROOT_OF_TEN,
+ VOLUME_FWL);
+
+ return linear_gain;
+}
+
+/*
+ * Set up volume table for kcontrols from tlv data
+ * "size" specifies the number of entries in the table
+ */
+static int set_up_volume_table(struct snd_sof_control *scontrol,
+ int tlv[TLV_ITEMS], int size)
+{
+ int j;
+
+ /* init the volume table */
+ scontrol->volume_table = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!scontrol->volume_table)
+ return -ENOMEM;
+
+ /* populate the volume table */
+ for (j = 0; j < size ; j++)
+ scontrol->volume_table[j] = vol_compute_gain(j, tlv);
+
+ return 0;
+}
+
+struct sof_dai_types {
+ const char *name;
+ enum sof_ipc_dai_type type;
+};
+
+static const struct sof_dai_types sof_dais[] = {
+ {"SSP", SOF_DAI_INTEL_SSP},
+ {"HDA", SOF_DAI_INTEL_HDA},
+ {"DMIC", SOF_DAI_INTEL_DMIC},
+};
+
+static enum sof_ipc_dai_type find_dai(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_dais); i++) {
+ if (strcmp(name, sof_dais[i].name) == 0)
+ return sof_dais[i].type;
+ }
+
+ return SOF_DAI_INTEL_NONE;
+}
+
+/*
+ * Supported Frame format types and lookup, add new ones to end of list.
+ */
+
+struct sof_frame_types {
+ const char *name;
+ enum sof_ipc_frame frame;
+};
+
+static const struct sof_frame_types sof_frames[] = {
+ {"s16le", SOF_IPC_FRAME_S16_LE},
+ {"s24le", SOF_IPC_FRAME_S24_4LE},
+ {"s32le", SOF_IPC_FRAME_S32_LE},
+ {"float", SOF_IPC_FRAME_FLOAT},
+};
+
+static enum sof_ipc_frame find_format(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_frames); i++) {
+ if (strcmp(name, sof_frames[i].name) == 0)
+ return sof_frames[i].frame;
+ }
+
+ /* use s32le if nothing is specified */
+ return SOF_IPC_FRAME_S32_LE;
+}
+
+struct sof_process_types {
+ const char *name;
+ enum sof_ipc_process_type type;
+ enum sof_comp_type comp_type;
+};
+
+static const struct sof_process_types sof_process[] = {
+ {"EQFIR", SOF_PROCESS_EQFIR, SOF_COMP_EQ_FIR},
+ {"EQIIR", SOF_PROCESS_EQIIR, SOF_COMP_EQ_IIR},
+ {"KEYWORD_DETECT", SOF_PROCESS_KEYWORD_DETECT, SOF_COMP_KEYWORD_DETECT},
+ {"KPB", SOF_PROCESS_KPB, SOF_COMP_KPB},
+ {"CHAN_SELECTOR", SOF_PROCESS_CHAN_SELECTOR, SOF_COMP_SELECTOR},
+};
+
+static enum sof_ipc_process_type find_process(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (strcmp(name, sof_process[i].name) == 0)
+ return sof_process[i].type;
+ }
+
+ return SOF_PROCESS_NONE;
+}
+
+static enum sof_comp_type find_process_comp_type(enum sof_ipc_process_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sof_process); i++) {
+ if (sof_process[i].type == type)
+ return sof_process[i].comp_type;
+ }
+
+ return SOF_COMP_NONE;
+}
+
+/*
+ * Standard Kcontrols.
+ */
+
+static int sof_control_load_volume(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_mixer_control *mc =
+ container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
+ struct sof_ipc_ctrl_data *cdata;
+ int tlv[TLV_ITEMS];
+ unsigned int i;
+ int ret;
+
+ /* validate topology data */
+ if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /* init the volume get/put data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ sizeof(struct sof_ipc_ctrl_value_chan) *
+ le32_to_cpu(mc->num_channels);
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(mc->num_channels);
+
+ /* set cmd for mixer control */
+ if (le32_to_cpu(mc->max) == 1) {
+ scontrol->cmd = SOF_CTRL_CMD_SWITCH;
+ goto out;
+ }
+
+ scontrol->cmd = SOF_CTRL_CMD_VOLUME;
+
+ /* extract tlv data */
+ if (get_tlv_data(kc->tlv.p, tlv) < 0) {
+ dev_err(sdev->dev, "error: invalid TLV data\n");
+ return -EINVAL;
+ }
+
+ /* set up volume table */
+ ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting up volume table\n");
+ return ret;
+ }
+
+ /* set default volume values to 0dB in control */
+ cdata = scontrol->control_data;
+ for (i = 0; i < scontrol->num_channels; i++) {
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = VOL_ZERO_DB;
+ }
+
+out:
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ return 0;
+}
+
+static int sof_control_load_enum(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_enum_control *ec =
+ container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
+
+ /* validate topology data */
+ if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /* init the enum get/put data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ sizeof(struct sof_ipc_ctrl_value_chan) *
+ le32_to_cpu(ec->num_channels);
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(ec->num_channels);
+
+ scontrol->cmd = SOF_CTRL_CMD_ENUM;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
+ scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
+
+ return 0;
+}
+
+static int sof_control_load_bytes(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_ctrl_data *cdata;
+ struct snd_soc_tplg_bytes_control *control =
+ container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+ struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
+ int max_size = sbe->max;
+
+ if (le32_to_cpu(control->priv.size) > max_size) {
+ dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
+ control->priv.size, max_size);
+ return -EINVAL;
+ }
+
+ /* init the get/put bytes data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ le32_to_cpu(control->priv.size);
+ scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
+ cdata = scontrol->control_data;
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->cmd = SOF_CTRL_CMD_BINARY;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ if (le32_to_cpu(control->priv.size) > 0) {
+ memcpy(cdata->data, control->priv.data,
+ le32_to_cpu(control->priv.size));
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err(sdev->dev, "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ return -EINVAL;
+ }
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
+ cdata->data->abi)) {
+ dev_err(sdev->dev,
+ "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ return -EINVAL;
+ }
+ if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
+ le32_to_cpu(control->priv.size)) {
+ dev_err(sdev->dev,
+ "error: Conflict in bytes vs. priv size.\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Topology Token Parsing.
+ * New tokens should be added to headers and parsing tables below.
+ */
+
+struct sof_topology_token {
+ u32 token;
+ u32 type;
+ int (*get_token)(void *elem, void *object, u32 offset, u32 size);
+ u32 offset;
+ u32 size;
+};
+
+static int get_token_u32(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = le32_to_cpu(velem->value);
+ return 0;
+}
+
+static int get_token_u16(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_value_elem *velem = elem;
+ u16 *val = (u16 *)((u8 *)object + offset);
+
+ *val = (u16)le32_to_cpu(velem->value);
+ return 0;
+}
+
+static int get_token_comp_format(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_format(velem->string);
+ return 0;
+}
+
+static int get_token_dai_type(void *elem, void *object, u32 offset, u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_dai(velem->string);
+ return 0;
+}
+
+static int get_token_process_type(void *elem, void *object, u32 offset,
+ u32 size)
+{
+ struct snd_soc_tplg_vendor_string_elem *velem = elem;
+ u32 *val = (u32 *)((u8 *)object + offset);
+
+ *val = find_process(velem->string);
+ return 0;
+}
+
+/* Buffers */
+static const struct sof_topology_token buffer_tokens[] = {
+ {SOF_TKN_BUF_SIZE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, size), 0},
+ {SOF_TKN_BUF_CAPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_buffer, caps), 0},
+};
+
+/* DAI */
+static const struct sof_topology_token dai_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_comp_dai, type), 0},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, dai_index), 0},
+ {SOF_TKN_DAI_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_dai, direction), 0},
+};
+
+/* BE DAI link */
+static const struct sof_topology_token dai_link_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc_dai_config, type), 0},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_config, dai_index), 0},
+};
+
+/* scheduling */
+static const struct sof_topology_token sched_tokens[] = {
+ {SOF_TKN_SCHED_PERIOD, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period), 0},
+ {SOF_TKN_SCHED_PRIORITY, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, priority), 0},
+ {SOF_TKN_SCHED_MIPS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, period_mips), 0},
+ {SOF_TKN_SCHED_CORE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, core), 0},
+ {SOF_TKN_SCHED_FRAMES, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, frames_per_sched), 0},
+ {SOF_TKN_SCHED_TIME_DOMAIN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_pipe_new, time_domain), 0},
+};
+
+/* volume */
+static const struct sof_topology_token volume_tokens[] = {
+ {SOF_TKN_VOLUME_RAMP_STEP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc_comp_volume, ramp), 0},
+ {SOF_TKN_VOLUME_RAMP_STEP_MS,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_volume, initial_ramp), 0},
+};
+
+/* SRC */
+static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_IN, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, source_rate), 0},
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_src, sink_rate), 0},
+};
+
+/* Tone */
+static const struct sof_topology_token tone_tokens[] = {
+};
+
+/* EFFECT */
+static const struct sof_topology_token process_tokens[] = {
+ {SOF_TKN_PROCESS_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING,
+ get_token_process_type,
+ offsetof(struct sof_ipc_comp_process, type), 0},
+};
+
+/* PCM */
+static const struct sof_topology_token pcm_tokens[] = {
+ {SOF_TKN_PCM_DMAC_CONFIG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_host, dmac_config), 0},
+};
+
+/* Generic components */
+static const struct sof_topology_token comp_tokens[] = {
+ {SOF_TKN_COMP_PERIOD_SINK_COUNT,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_sink), 0},
+ {SOF_TKN_COMP_PERIOD_SOURCE_COUNT,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_comp_config, periods_source), 0},
+ {SOF_TKN_COMP_FORMAT,
+ SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_comp_format,
+ offsetof(struct sof_ipc_comp_config, frame_fmt), 0},
+};
+
+/* SSP */
+static const struct sof_topology_token ssp_tokens[] = {
+ {SOF_TKN_INTEL_SSP_CLKS_CONTROL,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, clks_control), 0},
+ {SOF_TKN_INTEL_SSP_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, mclk_id), 0},
+ {SOF_TKN_INTEL_SSP_SAMPLE_BITS, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, sample_valid_bits), 0},
+ {SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH, SND_SOC_TPLG_TUPLE_TYPE_SHORT,
+ get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params, frame_pulse_width), 0},
+ {SOF_TKN_INTEL_SSP_QUIRKS, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32,
+ offsetof(struct sof_ipc_dai_ssp_params, quirks), 0},
+ {SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT, SND_SOC_TPLG_TUPLE_TYPE_BOOL,
+ get_token_u16,
+ offsetof(struct sof_ipc_dai_ssp_params,
+ tdm_per_slot_padding_flag), 0},
+
+};
+
+/* DMIC */
+static const struct sof_topology_token dmic_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_DRIVER_VERSION,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, driver_ipc_version),
+ 0},
+ {SOF_TKN_INTEL_DMIC_CLK_MIN,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_min), 0},
+ {SOF_TKN_INTEL_DMIC_CLK_MAX,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, pdmclk_max), 0},
+ {SOF_TKN_INTEL_DMIC_SAMPLE_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_fs), 0},
+ {SOF_TKN_INTEL_DMIC_DUTY_MIN,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_min), 0},
+ {SOF_TKN_INTEL_DMIC_DUTY_MAX,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, duty_max), 0},
+ {SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_dmic_params,
+ num_pdm_active), 0},
+ {SOF_TKN_INTEL_DMIC_FIFO_WORD_LENGTH,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_params, fifo_bits), 0},
+};
+
+/*
+ * DMIC PDM Tokens
+ * SOF_TKN_INTEL_DMIC_PDM_CTRL_ID should be the first token
+ * as it increments the index while parsing the array of pdm tokens
+ * and determines the correct offset
+ */
+static const struct sof_topology_token dmic_pdm_tokens[] = {
+ {SOF_TKN_INTEL_DMIC_PDM_CTRL_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, id),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_a),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, enable_mic_b),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_A,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_a),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_POLARITY_B,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, polarity_mic_b),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, clk_edge),
+ 0},
+ {SOF_TKN_INTEL_DMIC_PDM_SKEW,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_dmic_pdm_ctrl, skew),
+ 0},
+};
+
+/* HDA */
+static const struct sof_topology_token hda_tokens[] = {
+};
+
+static void sof_parse_uuid_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_uuid_elem *elem;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->uuid[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_UUID)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ tokens[j].get_token(elem, object, tokens[j].offset,
+ tokens[j].size);
+ }
+ }
+}
+
+static void sof_parse_string_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_soc_tplg_vendor_string_elem *elem;
+ int i, j;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->string[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (tokens[j].type != SND_SOC_TPLG_TUPLE_TYPE_STRING)
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* matched - now load token */
+ tokens[j].get_token(elem, object, tokens[j].offset,
+ tokens[j].size);
+ }
+ }
+}
+
+static void sof_parse_word_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_vendor_value_elem *elem;
+ size_t size = sizeof(struct sof_ipc_dai_dmic_pdm_ctrl);
+ int i, j;
+ u32 offset;
+ u32 *index = NULL;
+
+ /* parse element by element */
+ for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
+ elem = &array->value[i];
+
+ /* search for token */
+ for (j = 0; j < count; j++) {
+ /* match token type */
+ if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
+ tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+ continue;
+
+ /* match token id */
+ if (tokens[j].token != le32_to_cpu(elem->token))
+ continue;
+
+ /* pdm config array index */
+ if (sdev->private)
+ index = sdev->private;
+
+ /* matched - determine offset */
+ switch (tokens[j].token) {
+ case SOF_TKN_INTEL_DMIC_PDM_CTRL_ID:
+
+ /* inc number of pdm array index */
+ if (index)
+ (*index)++;
+ /* fallthrough */
+ case SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable:
+ case SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable:
+ case SOF_TKN_INTEL_DMIC_PDM_POLARITY_A:
+ case SOF_TKN_INTEL_DMIC_PDM_POLARITY_B:
+ case SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE:
+ case SOF_TKN_INTEL_DMIC_PDM_SKEW:
+
+ /* check if array index is valid */
+ if (!index || *index == 0) {
+ dev_err(sdev->dev,
+ "error: invalid array offset\n");
+ continue;
+ } else {
+ /* offset within the pdm config array */
+ offset = size * (*index - 1);
+ }
+ break;
+ default:
+ offset = 0;
+ break;
+ }
+
+ /* load token */
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
+ tokens[j].size);
+ }
+ }
+}
+
+static int sof_parse_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ int priv_size)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ int asize;
+
+ while (priv_size > 0) {
+ asize = le32_to_cpu(array->size);
+
+ /* validate asize */
+ if (asize < 0) { /* FIXME: A zero-size array makes no sense */
+ dev_err(sdev->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* make sure there is enough data before parsing */
+ priv_size -= asize;
+ if (priv_size < 0) {
+ dev_err(sdev->dev, "error: invalid array size 0x%x\n",
+ asize);
+ return -EINVAL;
+ }
+
+ /* call correct parser depending on type */
+ switch (le32_to_cpu(array->type)) {
+ case SND_SOC_TPLG_TUPLE_TYPE_UUID:
+ sof_parse_uuid_tokens(scomp, object, tokens, count,
+ array);
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_STRING:
+ sof_parse_string_tokens(scomp, object, tokens, count,
+ array);
+ break;
+ case SND_SOC_TPLG_TUPLE_TYPE_BOOL:
+ case SND_SOC_TPLG_TUPLE_TYPE_BYTE:
+ case SND_SOC_TPLG_TUPLE_TYPE_WORD:
+ case SND_SOC_TPLG_TUPLE_TYPE_SHORT:
+ sof_parse_word_tokens(scomp, object, tokens, count,
+ array);
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown token type %d\n",
+ array->type);
+ return -EINVAL;
+ }
+
+ /* next array */
+ array = (struct snd_soc_tplg_vendor_array *)((u8 *)array
+ + asize);
+ }
+ return 0;
+}
+
+static void sof_dbg_comp_config(struct snd_soc_component *scomp,
+ struct sof_ipc_comp_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+
+ dev_dbg(sdev->dev, " config: periods snk %d src %d fmt %d\n",
+ config->periods_sink, config->periods_source,
+ config->frame_fmt);
+}
+
+/* external kcontrol init - used for any driver specific init */
+static int sof_control_load(struct snd_soc_component *scomp, int index,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct soc_enum *se;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dobj *dobj;
+ struct snd_sof_control *scontrol;
+ int ret = -EINVAL;
+
+ dev_dbg(sdev->dev, "tplg: load control type %d name : %s\n",
+ hdr->type, hdr->name);
+
+ scontrol = kzalloc(sizeof(*scontrol), GFP_KERNEL);
+ if (!scontrol)
+ return -ENOMEM;
+
+ scontrol->sdev = sdev;
+
+ switch (le32_to_cpu(hdr->ops.info)) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ dobj = &sm->dobj;
+ ret = sof_control_load_volume(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ dobj = &sbe->dobj;
+ ret = sof_control_load_bytes(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_ENUM:
+ case SND_SOC_TPLG_CTL_ENUM_VALUE:
+ se = (struct soc_enum *)kc->private_value;
+ dobj = &se->dobj;
+ ret = sof_control_load_enum(scomp, scontrol, kc, hdr);
+ break;
+ case SND_SOC_TPLG_CTL_RANGE:
+ case SND_SOC_TPLG_CTL_STROBE:
+ case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+ case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+ case SND_SOC_TPLG_DAPM_CTL_PIN:
+ default:
+ dev_warn(sdev->dev, "control type not supported %d:%d:%d\n",
+ hdr->ops.get, hdr->ops.put, hdr->ops.info);
+ kfree(scontrol);
+ return 0;
+ }
+
+ dobj->private = scontrol;
+ list_add(&scontrol->list, &sdev->kcontrol_list);
+ return ret;
+}
+
+static int sof_control_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_free fcomp;
+ struct snd_sof_control *scontrol = dobj->private;
+
+ dev_dbg(sdev->dev, "tplg: unload control name : %s\n", scomp->name);
+
+ fcomp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_FREE;
+ fcomp.hdr.size = sizeof(fcomp);
+ fcomp.id = scontrol->comp_id;
+
+ kfree(scontrol->control_data);
+ list_del(&scontrol->list);
+ kfree(scontrol);
+ /* send IPC to the DSP */
+ return sof_ipc_tx_message(sdev->ipc,
+ fcomp.hdr.cmd, &fcomp, sizeof(fcomp),
+ NULL, 0);
+}
+
+/*
+ * DAI Topology
+ */
+
+static int sof_connect_dai_widget(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct snd_sof_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_card *card = scomp->card;
+ struct snd_soc_pcm_runtime *rtd;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ dev_vdbg(sdev->dev, "tplg: check widget: %s stream: %s dai stream: %s\n",
+ w->name, w->sname, rtd->dai_link->stream_name);
+
+ if (!w->sname || !rtd->dai_link->stream_name)
+ continue;
+
+ /* does stream match DAI link ? */
+ if (strcmp(w->sname, rtd->dai_link->stream_name))
+ continue;
+
+ switch (w->id) {
+ case snd_soc_dapm_dai_out:
+ rtd->cpu_dai->capture_widget = w;
+ dai->name = rtd->dai_link->name;
+ dev_dbg(sdev->dev, "tplg: connected widget %s -> DAI link %s\n",
+ w->name, rtd->dai_link->name);
+ break;
+ case snd_soc_dapm_dai_in:
+ rtd->cpu_dai->playback_widget = w;
+ dai->name = rtd->dai_link->name;
+ dev_dbg(sdev->dev, "tplg: connected widget %s -> DAI link %s\n",
+ w->name, rtd->dai_link->name);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check we have a connection */
+ if (!dai->name) {
+ dev_err(sdev->dev, "error: can't connect DAI %s stream %s\n",
+ w->name, w->sname);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sof_widget_load_dai(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r,
+ struct snd_sof_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_dai comp_dai;
+ int ret;
+
+ /* configure dai IPC message */
+ memset(&comp_dai, 0, sizeof(comp_dai));
+ comp_dai.comp.hdr.size = sizeof(comp_dai);
+ comp_dai.comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ comp_dai.comp.id = swidget->comp_id;
+ comp_dai.comp.type = SOF_COMP_DAI;
+ comp_dai.comp.pipeline_id = index;
+ comp_dai.config.hdr.size = sizeof(comp_dai.config);
+
+ ret = sof_parse_tokens(scomp, &comp_dai, dai_tokens,
+ ARRAY_SIZE(dai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse dai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ ret = sof_parse_tokens(scomp, &comp_dai.config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse dai.cfg tokens failed %d\n",
+ private->size);
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "dai %s: type %d index %d\n",
+ swidget->widget->name, comp_dai.type, comp_dai.dai_index);
+ sof_dbg_comp_config(scomp, &comp_dai.config);
+
+ ret = sof_ipc_tx_message(sdev->ipc, comp_dai.comp.hdr.cmd,
+ &comp_dai, sizeof(comp_dai), r, sizeof(*r));
+
+ if (ret == 0 && dai) {
+ dai->sdev = sdev;
+ memcpy(&dai->comp_dai, &comp_dai, sizeof(comp_dai));
+ }
+
+ return ret;
+}
+
+/*
+ * Buffer topology
+ */
+
+static int sof_widget_load_buffer(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* configure dai IPC message */
+ buffer->comp.hdr.size = sizeof(*buffer);
+ buffer->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_BUFFER_NEW;
+ buffer->comp.id = swidget->comp_id;
+ buffer->comp.type = SOF_COMP_BUFFER;
+ buffer->comp.pipeline_id = index;
+
+ ret = sof_parse_tokens(scomp, buffer, buffer_tokens,
+ ARRAY_SIZE(buffer_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse buffer tokens failed %d\n",
+ private->size);
+ kfree(buffer);
+ return ret;
+ }
+
+ dev_dbg(sdev->dev, "buffer %s: size %d caps 0x%x\n",
+ swidget->widget->name, buffer->size, buffer->caps);
+
+ swidget->private = buffer;
+
+ ret = sof_ipc_tx_message(sdev->ipc, buffer->comp.hdr.cmd, buffer,
+ sizeof(*buffer), r, sizeof(*r));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: buffer %s load failed\n",
+ swidget->widget->name);
+ kfree(buffer);
+ }
+
+ return ret;
+}
+
+/* bind PCM ID to host component ID */
+static int spcm_bind(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm,
+ int dir)
+{
+ struct snd_sof_widget *host_widget;
+
+ host_widget = snd_sof_find_swidget_sname(sdev,
+ spcm->pcm.caps[dir].name,
+ dir);
+ if (!host_widget) {
+ dev_err(sdev->dev, "can't find host comp to bind pcm\n");
+ return -EINVAL;
+ }
+
+ spcm->stream[dir].comp_id = host_widget->comp_id;
+
+ return 0;
+}
+
+/*
+ * PCM Topology
+ */
+
+static int sof_widget_load_pcm(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ enum sof_ipc_stream_direction dir,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_host *host;
+ int ret;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ /* configure host comp IPC message */
+ host->comp.hdr.size = sizeof(*host);
+ host->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ host->comp.id = swidget->comp_id;
+ host->comp.type = SOF_COMP_HOST;
+ host->comp.pipeline_id = index;
+ host->direction = dir;
+ host->config.hdr.size = sizeof(host->config);
+
+ ret = sof_parse_tokens(scomp, host, pcm_tokens,
+ ARRAY_SIZE(pcm_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse host tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &host->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse host.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "loaded host %s\n", swidget->widget->name);
+ sof_dbg_comp_config(scomp, &host->config);
+
+ swidget->private = host;
+
+ ret = sof_ipc_tx_message(sdev->ipc, host->comp.hdr.cmd, host,
+ sizeof(*host), r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(host);
+ return ret;
+}
+
+/*
+ * Pipeline Topology
+ */
+int sof_load_pipeline_ipc(struct snd_sof_dev *sdev,
+ struct sof_ipc_pipe_new *pipeline,
+ struct sof_ipc_comp_reply *r)
+{
+ struct sof_ipc_pm_core_config pm_core_config;
+ int ret;
+
+ ret = sof_ipc_tx_message(sdev->ipc, pipeline->hdr.cmd, pipeline,
+ sizeof(*pipeline), r, sizeof(*r));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: load pipeline ipc failure\n");
+ return ret;
+ }
+
+ /* power up the core that this pipeline is scheduled on */
+ ret = snd_sof_dsp_core_power_up(sdev, 1 << pipeline->core);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: powering up pipeline schedule core %d\n",
+ pipeline->core);
+ return ret;
+ }
+
+ /* update enabled cores mask */
+ sdev->enabled_cores_mask |= 1 << pipeline->core;
+
+ /*
+ * Now notify DSP that the core that this pipeline is scheduled on
+ * has been powered up
+ */
+ memset(&pm_core_config, 0, sizeof(pm_core_config));
+ pm_core_config.enable_mask = sdev->enabled_cores_mask;
+
+ /* configure CORE_ENABLE ipc message */
+ pm_core_config.hdr.size = sizeof(pm_core_config);
+ pm_core_config.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE;
+
+ /* send ipc */
+ ret = sof_ipc_tx_message(sdev->ipc, pm_core_config.hdr.cmd,
+ &pm_core_config, sizeof(pm_core_config),
+ &pm_core_config, sizeof(pm_core_config));
+ if (ret < 0)
+ dev_err(sdev->dev, "error: core enable ipc failure\n");
+
+ return ret;
+}
+
+static int sof_widget_load_pipeline(struct snd_soc_component *scomp,
+ int index, struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_widget *comp_swidget;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+ if (!pipeline)
+ return -ENOMEM;
+
+ /* configure dai IPC message */
+ pipeline->hdr.size = sizeof(*pipeline);
+ pipeline->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_NEW;
+ pipeline->pipeline_id = index;
+ pipeline->comp_id = swidget->comp_id;
+
+ /* component at start of pipeline is our stream id */
+ comp_swidget = snd_sof_find_swidget(sdev, tw->sname);
+ if (!comp_swidget) {
+ dev_err(sdev->dev, "error: widget %s refers to non existent widget %s\n",
+ tw->name, tw->sname);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pipeline->sched_id = comp_swidget->comp_id;
+
+ dev_dbg(sdev->dev, "tplg: pipeline id %d comp %d scheduling comp id %d\n",
+ pipeline->pipeline_id, pipeline->comp_id, pipeline->sched_id);
+
+ ret = sof_parse_tokens(scomp, pipeline, sched_tokens,
+ ARRAY_SIZE(sched_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse pipeline tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "pipeline %s: period %d pri %d mips %d core %d frames %d\n",
+ swidget->widget->name, pipeline->period, pipeline->priority,
+ pipeline->period_mips, pipeline->core, pipeline->frames_per_sched);
+
+ swidget->private = pipeline;
+
+ /* send ipc's to create pipeline comp and power up schedule core */
+ ret = sof_load_pipeline_ipc(sdev, pipeline, r);
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(pipeline);
+ return ret;
+}
+
+/*
+ * Mixer topology
+ */
+
+static int sof_widget_load_mixer(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_mixer *mixer;
+ int ret;
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* configure mixer IPC message */
+ mixer->comp.hdr.size = sizeof(*mixer);
+ mixer->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ mixer->comp.id = swidget->comp_id;
+ mixer->comp.type = SOF_COMP_MIXER;
+ mixer->comp.pipeline_id = index;
+ mixer->config.hdr.size = sizeof(mixer->config);
+
+ ret = sof_parse_tokens(scomp, &mixer->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse mixer.cfg tokens failed %d\n",
+ private->size);
+ kfree(mixer);
+ return ret;
+ }
+
+ sof_dbg_comp_config(scomp, &mixer->config);
+
+ swidget->private = mixer;
+
+ ret = sof_ipc_tx_message(sdev->ipc, mixer->comp.hdr.cmd, mixer,
+ sizeof(*mixer), r, sizeof(*r));
+ if (ret < 0)
+ kfree(mixer);
+
+ return ret;
+}
+
+/*
+ * Mux topology
+ */
+static int sof_widget_load_mux(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_mux *mux;
+ int ret;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ /* configure mux IPC message */
+ mux->comp.hdr.size = sizeof(*mux);
+ mux->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ mux->comp.id = swidget->comp_id;
+ mux->comp.type = SOF_COMP_MUX;
+ mux->comp.pipeline_id = index;
+ mux->config.hdr.size = sizeof(mux->config);
+
+ ret = sof_parse_tokens(scomp, &mux->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse mux.cfg tokens failed %d\n",
+ private->size);
+ kfree(mux);
+ return ret;
+ }
+
+ sof_dbg_comp_config(scomp, &mux->config);
+
+ swidget->private = mux;
+
+ ret = sof_ipc_tx_message(sdev->ipc, mux->comp.hdr.cmd, mux,
+ sizeof(*mux), r, sizeof(*r));
+ if (ret < 0)
+ kfree(mux);
+
+ return ret;
+}
+
+/*
+ * PGA Topology
+ */
+
+static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_volume *volume;
+ int ret;
+
+ volume = kzalloc(sizeof(*volume), GFP_KERNEL);
+ if (!volume)
+ return -ENOMEM;
+
+ if (le32_to_cpu(tw->num_kcontrols) != 1) {
+ dev_err(sdev->dev, "error: invalid kcontrol count %d for volume\n",
+ tw->num_kcontrols);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* configure volume IPC message */
+ volume->comp.hdr.size = sizeof(*volume);
+ volume->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ volume->comp.id = swidget->comp_id;
+ volume->comp.type = SOF_COMP_VOLUME;
+ volume->comp.pipeline_id = index;
+ volume->config.hdr.size = sizeof(volume->config);
+
+ ret = sof_parse_tokens(scomp, volume, volume_tokens,
+ ARRAY_SIZE(volume_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse volume tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+ ret = sof_parse_tokens(scomp, &volume->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse volume.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ sof_dbg_comp_config(scomp, &volume->config);
+
+ swidget->private = volume;
+
+ ret = sof_ipc_tx_message(sdev->ipc, volume->comp.hdr.cmd, volume,
+ sizeof(*volume), r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(volume);
+ return ret;
+}
+
+/*
+ * SRC Topology
+ */
+
+static int sof_widget_load_src(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_src *src;
+ int ret;
+
+ src = kzalloc(sizeof(*src), GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ /* configure src IPC message */
+ src->comp.hdr.size = sizeof(*src);
+ src->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ src->comp.id = swidget->comp_id;
+ src->comp.type = SOF_COMP_SRC;
+ src->comp.pipeline_id = index;
+ src->config.hdr.size = sizeof(src->config);
+
+ ret = sof_parse_tokens(scomp, src, src_tokens,
+ ARRAY_SIZE(src_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse src tokens failed %d\n",
+ private->size);
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &src->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse src.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "src %s: source rate %d sink rate %d\n",
+ swidget->widget->name, src->source_rate, src->sink_rate);
+ sof_dbg_comp_config(scomp, &src->config);
+
+ swidget->private = src;
+
+ ret = sof_ipc_tx_message(sdev->ipc, src->comp.hdr.cmd, src,
+ sizeof(*src), r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(src);
+ return ret;
+}
+
+/*
+ * Signal Generator Topology
+ */
+
+static int sof_widget_load_siggen(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_tone *tone;
+ int ret;
+
+ tone = kzalloc(sizeof(*tone), GFP_KERNEL);
+ if (!tone)
+ return -ENOMEM;
+
+ /* configure siggen IPC message */
+ tone->comp.hdr.size = sizeof(*tone);
+ tone->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ tone->comp.id = swidget->comp_id;
+ tone->comp.type = SOF_COMP_TONE;
+ tone->comp.pipeline_id = index;
+ tone->config.hdr.size = sizeof(tone->config);
+
+ ret = sof_parse_tokens(scomp, tone, tone_tokens,
+ ARRAY_SIZE(tone_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse tone tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ ret = sof_parse_tokens(scomp, &tone->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse tone.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "tone %s: frequency %d amplitude %d\n",
+ swidget->widget->name, tone->frequency, tone->amplitude);
+ sof_dbg_comp_config(scomp, &tone->config);
+
+ swidget->private = tone;
+
+ ret = sof_ipc_tx_message(sdev->ipc, tone->comp.hdr.cmd, tone,
+ sizeof(*tone), r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(tone);
+ return ret;
+}
+
+static int sof_process_load(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r,
+ int type)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct snd_soc_dapm_widget *widget = swidget->widget;
+ const struct snd_kcontrol_new *kc;
+ struct soc_bytes_ext *sbe;
+ struct soc_mixer_control *sm;
+ struct soc_enum *se;
+ struct snd_sof_control *scontrol = NULL;
+ struct sof_abi_hdr *pdata = NULL;
+ struct sof_ipc_comp_process *process;
+ size_t ipc_size, ipc_data_size = 0;
+ int ret, i, offset = 0;
+
+ if (type == SOF_COMP_NONE) {
+ dev_err(sdev->dev, "error: invalid process comp type %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ /*
+ * get possible component controls - get size of all pdata,
+ * then memcpy with headers
+ */
+ for (i = 0; i < widget->num_kcontrols; i++) {
+
+ kc = &widget->kcontrol_news[i];
+
+ switch (widget->dobj.widget.kcontrol_type) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = sbe->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown kcontrol type %d in widget %s\n",
+ widget->dobj.widget.kcontrol_type,
+ widget->name);
+ return -EINVAL;
+ }
+
+ if (!scontrol) {
+ dev_err(sdev->dev, "error: no scontrol for widget %s\n",
+ widget->name);
+ return -EINVAL;
+ }
+
+ /* don't include if no private data */
+ pdata = scontrol->control_data->data;
+ if (!pdata)
+ continue;
+
+ /* make sure data is valid - data can be updated at runtime */
+ if (pdata->magic != SOF_ABI_MAGIC)
+ continue;
+
+ ipc_data_size += pdata->size;
+ }
+
+ ipc_size = sizeof(struct sof_ipc_comp_process) +
+ le32_to_cpu(private->size) +
+ ipc_data_size;
+
+ process = kzalloc(ipc_size, GFP_KERNEL);
+ if (!process)
+ return -ENOMEM;
+
+ /* configure iir IPC message */
+ process->comp.hdr.size = ipc_size;
+ process->comp.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_NEW;
+ process->comp.id = swidget->comp_id;
+ process->comp.type = type;
+ process->comp.pipeline_id = index;
+ process->config.hdr.size = sizeof(process->config);
+
+ ret = sof_parse_tokens(scomp, &process->config, comp_tokens,
+ ARRAY_SIZE(comp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse process.cfg tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ sof_dbg_comp_config(scomp, &process->config);
+
+ /*
+ * found private data in control, so copy it.
+ * get possible component controls - get size of all pdata,
+ * then memcpy with headers
+ */
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+
+ switch (widget->dobj.widget.kcontrol_type) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = sbe->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ default:
+ dev_err(sdev->dev, "error: unknown kcontrol type %d in widget %s\n",
+ widget->dobj.widget.kcontrol_type,
+ widget->name);
+ return -EINVAL;
+ }
+
+ /* don't include if no private data */
+ pdata = scontrol->control_data->data;
+ if (!pdata)
+ continue;
+
+ /* make sure data is valid - data can be updated at runtime */
+ if (pdata->magic != SOF_ABI_MAGIC)
+ continue;
+
+ memcpy(&process->data + offset, pdata->data, pdata->size);
+ offset += pdata->size;
+ }
+
+ process->size = ipc_data_size;
+ swidget->private = process;
+
+ ret = sof_ipc_tx_message(sdev->ipc, process->comp.hdr.cmd, process,
+ ipc_size, r, sizeof(*r));
+ if (ret >= 0)
+ return ret;
+err:
+ kfree(process);
+ return ret;
+}
+
+/*
+ * Processing Component Topology - can be "effect", "codec", or general
+ * "processing".
+ */
+
+static int sof_widget_load_process(struct snd_soc_component *scomp, int index,
+ struct snd_sof_widget *swidget,
+ struct snd_soc_tplg_dapm_widget *tw,
+ struct sof_ipc_comp_reply *r)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &tw->priv;
+ struct sof_ipc_comp_process config;
+ int ret;
+
+ /* check we have some tokens - we need at least process type */
+ if (le32_to_cpu(private->size) == 0) {
+ dev_err(sdev->dev, "error: process tokens not found\n");
+ return -EINVAL;
+ }
+
+ memset(&config, 0, sizeof(config));
+
+ /* get the process token */
+ ret = sof_parse_tokens(scomp, &config, process_tokens,
+ ARRAY_SIZE(process_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse process tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /* now load process specific data and send IPC */
+ ret = sof_process_load(scomp, index, swidget, tw, r,
+ find_process_comp_type(config.type));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: process loading failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sof_widget_bind_event(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget,
+ u16 event_type)
+{
+ struct sof_ipc_comp *ipc_comp;
+
+ /* validate widget event type */
+ switch (event_type) {
+ case SOF_KEYWORD_DETECT_DAPM_EVENT:
+ /* only KEYWORD_DETECT comps should handle this */
+ if (swidget->id != snd_soc_dapm_effect)
+ break;
+
+ ipc_comp = swidget->private;
+ if (ipc_comp && ipc_comp->type != SOF_COMP_KEYWORD_DETECT)
+ break;
+
+ /* bind event to keyword detect comp */
+ return snd_soc_tplg_widget_bind_event(swidget->widget,
+ sof_kwd_events,
+ ARRAY_SIZE(sof_kwd_events),
+ event_type);
+ default:
+ break;
+ }
+
+ dev_err(sdev->dev,
+ "error: invalid event type %d for widget %s\n",
+ event_type, swidget->widget->name);
+ return -EINVAL;
+}
+
+/* external widget init - used for any driver specific init */
+static int sof_widget_ready(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_widget *w,
+ struct snd_soc_tplg_dapm_widget *tw)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+ struct snd_sof_dai *dai;
+ struct sof_ipc_comp_reply reply;
+ struct snd_sof_control *scontrol;
+ int ret = 0;
+
+ swidget = kzalloc(sizeof(*swidget), GFP_KERNEL);
+ if (!swidget)
+ return -ENOMEM;
+
+ swidget->sdev = sdev;
+ swidget->widget = w;
+ swidget->comp_id = sdev->next_comp_id++;
+ swidget->complete = 0;
+ swidget->id = w->id;
+ swidget->pipeline_id = index;
+ swidget->private = NULL;
+ memset(&reply, 0, sizeof(reply));
+
+ dev_dbg(sdev->dev, "tplg: ready widget id %d pipe %d type %d name : %s stream %s\n",
+ swidget->comp_id, index, swidget->id, tw->name,
+ strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
+ ? tw->sname : "none");
+
+ /* handle any special case widgets */
+ switch (w->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = kzalloc(sizeof(*dai), GFP_KERNEL);
+ if (!dai) {
+ kfree(swidget);
+ return -ENOMEM;
+ }
+
+ ret = sof_widget_load_dai(scomp, index, swidget, tw, &reply,
+ dai);
+ if (ret == 0) {
+ sof_connect_dai_widget(scomp, w, tw, dai);
+ list_add(&dai->list, &sdev->dai_list);
+ swidget->private = dai;
+ } else {
+ kfree(dai);
+ }
+ break;
+ case snd_soc_dapm_mixer:
+ ret = sof_widget_load_mixer(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_pga:
+ ret = sof_widget_load_pga(scomp, index, swidget, tw, &reply);
+ /* Find scontrol for this pga and set readback offset*/
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
+ if (scontrol->comp_id == swidget->comp_id) {
+ scontrol->readback_offset = reply.offset;
+ break;
+ }
+ }
+ break;
+ case snd_soc_dapm_buffer:
+ ret = sof_widget_load_buffer(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_scheduler:
+ ret = sof_widget_load_pipeline(scomp, index, swidget, tw,
+ &reply);
+ break;
+ case snd_soc_dapm_aif_out:
+ ret = sof_widget_load_pcm(scomp, index, swidget,
+ SOF_IPC_STREAM_CAPTURE, tw, &reply);
+ break;
+ case snd_soc_dapm_aif_in:
+ ret = sof_widget_load_pcm(scomp, index, swidget,
+ SOF_IPC_STREAM_PLAYBACK, tw, &reply);
+ break;
+ case snd_soc_dapm_src:
+ ret = sof_widget_load_src(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_siggen:
+ ret = sof_widget_load_siggen(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_effect:
+ ret = sof_widget_load_process(scomp, index, swidget, tw,
+ &reply);
+ break;
+ case snd_soc_dapm_mux:
+ case snd_soc_dapm_demux:
+ ret = sof_widget_load_mux(scomp, index, swidget, tw, &reply);
+ break;
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_kcontrol:
+ default:
+ dev_warn(sdev->dev, "warning: widget type %d name %s not handled\n",
+ swidget->id, tw->name);
+ break;
+ }
+
+ /* check IPC reply */
+ if (ret < 0 || reply.rhdr.error < 0) {
+ dev_err(sdev->dev,
+ "error: DSP failed to add widget id %d type %d name : %s stream %s reply %d\n",
+ tw->shift, swidget->id, tw->name,
+ strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
+ ? tw->sname : "none", reply.rhdr.error);
+ kfree(swidget);
+ return ret;
+ }
+
+ /* bind widget to external event */
+ if (tw->event_type) {
+ ret = sof_widget_bind_event(sdev, swidget,
+ le16_to_cpu(tw->event_type));
+ if (ret) {
+ dev_err(sdev->dev, "error: widget event binding failed\n");
+ kfree(swidget->private);
+ kfree(swidget);
+ return ret;
+ }
+ }
+
+ w->dobj.private = swidget;
+ list_add(&swidget->list, &sdev->widget_list);
+ return ret;
+}
+
+static int sof_route_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_route *sroute;
+
+ sroute = dobj->private;
+ if (!sroute)
+ return 0;
+
+ /* free sroute and its private data */
+ kfree(sroute->private);
+ list_del(&sroute->list);
+ kfree(sroute);
+
+ return 0;
+}
+
+static int sof_widget_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct snd_kcontrol_new *kc;
+ struct snd_soc_dapm_widget *widget;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_control *scontrol;
+ struct snd_sof_widget *swidget;
+ struct soc_mixer_control *sm;
+ struct soc_bytes_ext *sbe;
+ struct snd_sof_dai *dai;
+ struct soc_enum *se;
+ int ret = 0;
+ int i;
+
+ swidget = dobj->private;
+ if (!swidget)
+ return 0;
+
+ widget = swidget->widget;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ dai = swidget->private;
+
+ if (dai) {
+ /* free dai config */
+ kfree(dai->dai_config);
+ list_del(&dai->list);
+ }
+ break;
+ case snd_soc_dapm_scheduler:
+
+ /* power down the pipeline schedule core */
+ pipeline = swidget->private;
+ ret = snd_sof_dsp_core_power_down(sdev, 1 << pipeline->core);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: powering down pipeline schedule core %d\n",
+ pipeline->core);
+
+ /* update enabled cores mask */
+ sdev->enabled_cores_mask &= ~(1 << pipeline->core);
+
+ break;
+ default:
+ break;
+ }
+ for (i = 0; i < widget->num_kcontrols; i++) {
+ kc = &widget->kcontrol_news[i];
+ switch (dobj->widget.kcontrol_type) {
+ case SND_SOC_TPLG_TYPE_MIXER:
+ sm = (struct soc_mixer_control *)kc->private_value;
+ scontrol = sm->dobj.private;
+ if (sm->max > 1)
+ kfree(scontrol->volume_table);
+ break;
+ case SND_SOC_TPLG_TYPE_ENUM:
+ se = (struct soc_enum *)kc->private_value;
+ scontrol = se->dobj.private;
+ break;
+ case SND_SOC_TPLG_TYPE_BYTES:
+ sbe = (struct soc_bytes_ext *)kc->private_value;
+ scontrol = sbe->dobj.private;
+ break;
+ default:
+ dev_warn(sdev->dev, "unsupported kcontrol_type\n");
+ goto out;
+ }
+ kfree(scontrol->control_data);
+ list_del(&scontrol->list);
+ kfree(scontrol);
+ }
+
+out:
+ /* free private value */
+ kfree(swidget->private);
+
+ /* remove and free swidget object */
+ list_del(&swidget->list);
+ kfree(swidget);
+
+ return ret;
+}
+
+/*
+ * DAI HW configuration.
+ */
+
+/* FE DAI - used for any driver specific init */
+static int sof_dai_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_stream_caps *caps;
+ struct snd_sof_pcm *spcm;
+ int stream = SNDRV_PCM_STREAM_PLAYBACK;
+ int ret = 0;
+
+ /* nothing to do for BEs atm */
+ if (!pcm)
+ return 0;
+
+ spcm = kzalloc(sizeof(*spcm), GFP_KERNEL);
+ if (!spcm)
+ return -ENOMEM;
+
+ spcm->sdev = sdev;
+ spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].comp_id = COMP_ID_UNASSIGNED;
+ spcm->stream[SNDRV_PCM_STREAM_CAPTURE].comp_id = COMP_ID_UNASSIGNED;
+
+ if (pcm) {
+ spcm->pcm = *pcm;
+ dev_dbg(sdev->dev, "tplg: load pcm %s\n", pcm->dai_name);
+ }
+ dai_drv->dobj.private = spcm;
+ list_add(&spcm->list, &sdev->pcm_list);
+
+ /* do we need to allocate playback PCM DMA pages */
+ if (!spcm->pcm.playback)
+ goto capture;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate playback page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+
+ return ret;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(sdev, spcm, stream);
+ if (ret) {
+ dev_err(sdev->dev,
+ "error: can't bind pcm to host\n");
+ goto free_playback_tables;
+ }
+
+capture:
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ /* do we need to allocate capture PCM DMA pages */
+ if (!spcm->pcm.capture)
+ return ret;
+
+ caps = &spcm->pcm.caps[stream];
+
+ /* allocate capture page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &spcm->stream[stream].page_table);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: can't alloc page table for %s %d\n",
+ caps->name, ret);
+ goto free_playback_tables;
+ }
+
+ /* bind pcm to host comp */
+ ret = spcm_bind(sdev, spcm, stream);
+ if (ret) {
+ dev_err(sdev->dev,
+ "error: can't bind pcm to host\n");
+ snd_dma_free_pages(&spcm->stream[stream].page_table);
+ goto free_playback_tables;
+ }
+
+ return ret;
+
+free_playback_tables:
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ return ret;
+}
+
+static int sof_dai_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_pcm *spcm = dobj->private;
+
+ /* free PCM DMA pages */
+ if (spcm->pcm.playback)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].page_table);
+
+ if (spcm->pcm.capture)
+ snd_dma_free_pages(&spcm->stream[SNDRV_PCM_STREAM_CAPTURE].page_table);
+
+ /* remove from list and free spcm */
+ list_del(&spcm->list);
+ kfree(spcm);
+
+ return 0;
+}
+
+static void sof_dai_set_format(struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ /* clock directions wrt codec */
+ if (hw_config->bclk_master == SND_SOC_TPLG_BCLK_CM) {
+ /* codec is bclk master */
+ if (hw_config->fsync_master == SND_SOC_TPLG_FSYNC_CM)
+ config->format |= SOF_DAI_FMT_CBM_CFM;
+ else
+ config->format |= SOF_DAI_FMT_CBM_CFS;
+ } else {
+ /* codec is bclk slave */
+ if (hw_config->fsync_master == SND_SOC_TPLG_FSYNC_CM)
+ config->format |= SOF_DAI_FMT_CBS_CFM;
+ else
+ config->format |= SOF_DAI_FMT_CBS_CFS;
+ }
+
+ /* inverted clocks ? */
+ if (hw_config->invert_bclk) {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_IB_IF;
+ else
+ config->format |= SOF_DAI_FMT_IB_NF;
+ } else {
+ if (hw_config->invert_fsync)
+ config->format |= SOF_DAI_FMT_NB_IF;
+ else
+ config->format |= SOF_DAI_FMT_NB_NF;
+ }
+}
+
+/* set config for all DAI's with name matching the link name */
+static int sof_set_dai_config(struct snd_sof_dev *sdev, u32 size,
+ struct snd_soc_dai_link *link,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dai *dai;
+ int found = 0;
+
+ list_for_each_entry(dai, &sdev->dai_list, list) {
+ if (!dai->name)
+ continue;
+
+ if (strcmp(link->name, dai->name) == 0) {
+ dai->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!dai->dai_config)
+ return -ENOMEM;
+
+ found = 1;
+ }
+ }
+
+ /*
+ * machine driver may define a dai link with playback and capture
+ * dai enabled, but the dai link in topology would support both, one
+ * or none of them. Here print a warning message to notify user
+ */
+ if (!found) {
+ dev_warn(sdev->dev, "warning: failed to find dai for dai link %s",
+ link->name);
+ }
+
+ return 0;
+}
+
+static int sof_link_ssp_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_reply reply;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->ssp, 0, sizeof(struct sof_ipc_dai_ssp_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->ssp, ssp_tokens,
+ ARRAY_SIZE(ssp_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse ssp tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->ssp.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->ssp.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->ssp.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->ssp.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->ssp.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->ssp.mclk_direction = hw_config->mclk_direction;
+ config->ssp.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->ssp.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_dbg(sdev->dev, "tplg: config SSP%d fmt 0x%x mclk %d bclk %d fclk %d width (%d)%d slots %d mclk id %d quirks %d\n",
+ config->dai_index, config->format,
+ config->ssp.mclk_rate, config->ssp.bclk_rate,
+ config->ssp.fsync_rate, config->ssp.sample_valid_bits,
+ config->ssp.tdm_slot_width, config->ssp.tdm_slots,
+ config->ssp.mclk_id, config->ssp.quirks);
+
+ /* validate SSP fsync rate and channel count */
+ if (config->ssp.fsync_rate < 8000 || config->ssp.fsync_rate > 192000) {
+ dev_err(sdev->dev, "error: invalid fsync rate for SSP%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ if (config->ssp.tdm_slots < 1 || config->ssp.tdm_slots > 8) {
+ dev_err(sdev->dev, "error: invalid channel count for SSP%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size, &reply,
+ sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for SSP%d\n",
+ config->dai_index);
+ return ret;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to save DAI config for SSP%d\n",
+ config->dai_index);
+
+ return ret;
+}
+
+static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_dai_config *ipc_config;
+ struct sof_ipc_reply reply;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
+ u32 size;
+ int ret, j;
+
+ /*
+ * config is only used for the common params in dmic_params structure
+ * that does not include the PDM controller config array
+ * Set the common params to 0.
+ */
+ memset(&config->dmic, 0, sizeof(struct sof_ipc_dai_dmic_params));
+
+ /* get DMIC tokens */
+ ret = sof_parse_tokens(scomp, &config->dmic, dmic_tokens,
+ ARRAY_SIZE(dmic_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse dmic tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /*
+ * allocate memory for dmic dai config accounting for the
+ * variable number of active pdm controllers
+ * This will be the ipc payload for setting dai config
+ */
+ size = sizeof(*config) + sizeof(struct sof_ipc_dai_dmic_pdm_ctrl) *
+ config->dmic.num_pdm_active;
+
+ ipc_config = kzalloc(size, GFP_KERNEL);
+ if (!ipc_config)
+ return -ENOMEM;
+
+ /* copy the common dai config and dmic params */
+ memcpy(ipc_config, config, sizeof(*config));
+
+ /*
+ * alloc memory for private member
+ * Used to track the pdm config array index currently being parsed
+ */
+ sdev->private = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdev->private) {
+ kfree(ipc_config);
+ return -ENOMEM;
+ }
+
+ /* get DMIC PDM tokens */
+ ret = sof_parse_tokens(scomp, &ipc_config->dmic.pdm[0], dmic_pdm_tokens,
+ ARRAY_SIZE(dmic_pdm_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse dmic pdm tokens failed %d\n",
+ le32_to_cpu(private->size));
+ goto err;
+ }
+
+ /* set IPC header size */
+ ipc_config->hdr.size = size;
+
+ /* debug messages */
+ dev_dbg(sdev->dev, "tplg: config DMIC%d driver version %d\n",
+ ipc_config->dai_index, ipc_config->dmic.driver_ipc_version);
+ dev_dbg(sdev->dev, "pdmclk_min %d pdm_clkmax %d duty_min %hd\n",
+ ipc_config->dmic.pdmclk_min, ipc_config->dmic.pdmclk_max,
+ ipc_config->dmic.duty_min);
+ dev_dbg(sdev->dev, "duty_max %hd fifo_fs %d num_pdms active %d\n",
+ ipc_config->dmic.duty_max, ipc_config->dmic.fifo_fs,
+ ipc_config->dmic.num_pdm_active);
+ dev_dbg(sdev->dev, "fifo word length %hd\n",
+ ipc_config->dmic.fifo_bits);
+
+ for (j = 0; j < ipc_config->dmic.num_pdm_active; j++) {
+ dev_dbg(sdev->dev, "pdm %hd mic a %hd mic b %hd\n",
+ ipc_config->dmic.pdm[j].id,
+ ipc_config->dmic.pdm[j].enable_mic_a,
+ ipc_config->dmic.pdm[j].enable_mic_b);
+ dev_dbg(sdev->dev, "pdm %hd polarity a %hd polarity b %hd\n",
+ ipc_config->dmic.pdm[j].id,
+ ipc_config->dmic.pdm[j].polarity_mic_a,
+ ipc_config->dmic.pdm[j].polarity_mic_b);
+ dev_dbg(sdev->dev, "pdm %hd clk_edge %hd skew %hd\n",
+ ipc_config->dmic.pdm[j].id,
+ ipc_config->dmic.pdm[j].clk_edge,
+ ipc_config->dmic.pdm[j].skew);
+ }
+
+ if (SOF_ABI_VER(v->major, v->minor, v->micro) < SOF_ABI_VER(3, 0, 1)) {
+ /* this takes care of backwards compatible handling of fifo_bits_b */
+ ipc_config->dmic.reserved_2 = ipc_config->dmic.fifo_bits;
+ }
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ ipc_config->hdr.cmd, ipc_config, size, &reply,
+ sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set DAI config for DMIC%d\n",
+ config->dai_index);
+ goto err;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, ipc_config);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to save DAI config for DMIC%d\n",
+ config->dai_index);
+
+err:
+ kfree(sdev->private);
+ kfree(ipc_config);
+
+ return ret;
+}
+
+/*
+ * for hda link, playback and capture are supported by different dai
+ * in FW. Here get the dai_index, set dma channel of each dai
+ * and send config to FW. In FW, each dai sets config by dai_index
+ */
+static int sof_link_hda_process(struct snd_sof_dev *sdev,
+ struct snd_soc_dai_link *link,
+ struct sof_ipc_dai_config *config,
+ int tx_slot,
+ int rx_slot)
+{
+ struct sof_ipc_reply reply;
+ u32 size = sizeof(*config);
+ struct snd_sof_dai *sof_dai;
+ int found = 0;
+ int ret;
+
+ list_for_each_entry(sof_dai, &sdev->dai_list, list) {
+ if (!sof_dai->name)
+ continue;
+
+ if (strcmp(link->name, sof_dai->name) == 0) {
+ if (sof_dai->comp_dai.direction ==
+ SNDRV_PCM_STREAM_PLAYBACK) {
+ if (!link->dpcm_playback)
+ return -EINVAL;
+
+ config->hda.link_dma_ch = tx_slot;
+ } else {
+ if (!link->dpcm_capture)
+ return -EINVAL;
+
+ config->hda.link_dma_ch = rx_slot;
+ }
+
+ config->dai_index = sof_dai->comp_dai.dai_index;
+ found = 1;
+
+ /* save config in dai component */
+ sof_dai->dai_config = kmemdup(config, size, GFP_KERNEL);
+ if (!sof_dai->dai_config)
+ return -ENOMEM;
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size,
+ &reply, sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for direction:%d of HDA dai %d\n",
+ sof_dai->comp_dai.direction,
+ config->dai_index);
+
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * machine driver may define a dai link with playback and capture
+ * dai enabled, but the dai link in topology would support both, one
+ * or none of them. Here print a warning message to notify user
+ */
+ if (!found) {
+ dev_warn(sdev->dev, "warning: failed to find dai for dai link %s",
+ link->name);
+ }
+
+ return 0;
+}
+
+static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg,
+ struct snd_soc_tplg_hw_config *hw_config,
+ struct sof_ipc_dai_config *config)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dai_link_component dai_component;
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct snd_soc_dai *dai;
+ u32 size = sizeof(*config);
+ u32 tx_num = 0;
+ u32 tx_slot = 0;
+ u32 rx_num = 0;
+ u32 rx_slot = 0;
+ int ret;
+
+ /* init IPC */
+ memset(&dai_component, 0, sizeof(dai_component));
+ memset(&config->hda, 0, sizeof(struct sof_ipc_dai_hda_params));
+ config->hdr.size = size;
+
+ /* get any bespoke DAI tokens */
+ ret = sof_parse_tokens(scomp, config, hda_tokens,
+ ARRAY_SIZE(hda_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse hda tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ dai_component.dai_name = link->cpu_dai_name;
+ dai = snd_soc_find_dai(&dai_component);
+ if (!dai) {
+ dev_err(sdev->dev, "error: failed to find dai %s in %s",
+ dai_component.dai_name, __func__);
+ return -EINVAL;
+ }
+
+ if (link->dpcm_playback)
+ tx_num = 1;
+
+ if (link->dpcm_capture)
+ rx_num = 1;
+
+ ret = snd_soc_dai_get_channel_map(dai, &tx_num, &tx_slot,
+ &rx_num, &rx_slot);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to get dma channel for HDA%d\n",
+ config->dai_index);
+
+ return ret;
+ }
+
+ ret = sof_link_hda_process(sdev, link, config, tx_slot, rx_slot);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to process hda dai link %s",
+ link->name);
+
+ return ret;
+}
+
+/* DAI link - used for any driver specific init */
+static int sof_link_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_dai_config config;
+ struct snd_soc_tplg_hw_config *hw_config;
+ int num_hw_configs;
+ int ret;
+ int i = 0;
+
+ link->platform_name = dev_name(sdev->dev);
+
+ /*
+ * Set nonatomic property for FE dai links as their trigger action
+ * involves IPC's.
+ */
+ if (!link->no_pcm) {
+ link->nonatomic = true;
+
+ /* nothing more to do for FE dai links */
+ return 0;
+ }
+
+ /* check we have some tokens - we need at least DAI type */
+ if (le32_to_cpu(private->size) == 0) {
+ dev_err(sdev->dev, "error: expected tokens for DAI, none found\n");
+ return -EINVAL;
+ }
+
+ /* Send BE DAI link configurations to DSP */
+ memset(&config, 0, sizeof(config));
+
+ /* get any common DAI tokens */
+ ret = sof_parse_tokens(scomp, &config, dai_link_tokens,
+ ARRAY_SIZE(dai_link_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse link tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ /*
+ * DAI links are expected to have at least 1 hw_config.
+ * But some older topologies might have no hw_config for HDA dai links.
+ */
+ num_hw_configs = le32_to_cpu(cfg->num_hw_configs);
+ if (!num_hw_configs) {
+ if (config.type != SOF_DAI_INTEL_HDA) {
+ dev_err(sdev->dev, "error: unexpected DAI config count %d!\n",
+ le32_to_cpu(cfg->num_hw_configs));
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(sdev->dev, "tplg: %d hw_configs found, default id: %d!\n",
+ cfg->num_hw_configs, le32_to_cpu(cfg->default_hw_config_id));
+
+ for (i = 0; i < num_hw_configs; i++) {
+ if (cfg->hw_config[i].id == cfg->default_hw_config_id)
+ break;
+ }
+
+ if (i == num_hw_configs) {
+ dev_err(sdev->dev, "error: default hw_config id: %d not found!\n",
+ le32_to_cpu(cfg->default_hw_config_id));
+ return -EINVAL;
+ }
+ }
+
+ /* configure dai IPC message */
+ hw_config = &cfg->hw_config[i];
+
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.format = le32_to_cpu(hw_config->fmt);
+
+ /* now load DAI specific data and send IPC - type comes from token */
+ switch (config.type) {
+ case SOF_DAI_INTEL_SSP:
+ ret = sof_link_ssp_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ ret = sof_link_dmic_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ case SOF_DAI_INTEL_HDA:
+ ret = sof_link_hda_load(scomp, index, link, cfg, hw_config,
+ &config);
+ break;
+ default:
+ dev_err(sdev->dev, "error: invalid DAI type %d\n", config.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sof_link_hda_unload(struct snd_sof_dev *sdev,
+ struct snd_soc_dai_link *link)
+{
+ struct snd_soc_dai_link_component dai_component;
+ struct snd_soc_dai *dai;
+ int ret = 0;
+
+ memset(&dai_component, 0, sizeof(dai_component));
+ dai_component.dai_name = link->cpu_dai_name;
+ dai = snd_soc_find_dai(&dai_component);
+ if (!dai) {
+ dev_err(sdev->dev, "error: failed to find dai %s in %s",
+ dai_component.dai_name, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: this call to hw_free is mainly to release the link DMA ID.
+ * This is abusing the API and handling SOC internals is not
+ * recommended. This part will be reworked.
+ */
+ if (dai->driver->ops->hw_free)
+ ret = dai->driver->ops->hw_free(NULL, dai);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to free hda resource for %s\n",
+ link->name);
+
+ return ret;
+}
+
+static int sof_link_unload(struct snd_soc_component *scomp,
+ struct snd_soc_dobj *dobj)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_dai_link *link =
+ container_of(dobj, struct snd_soc_dai_link, dobj);
+
+ struct snd_sof_dai *sof_dai;
+ int ret = 0;
+
+ /* only BE link is loaded by sof */
+ if (!link->no_pcm)
+ return 0;
+
+ list_for_each_entry(sof_dai, &sdev->dai_list, list) {
+ if (!sof_dai->name)
+ continue;
+
+ if (strcmp(link->name, sof_dai->name) == 0)
+ goto found;
+ }
+
+ dev_err(sdev->dev, "error: failed to find dai %s in %s",
+ link->name, __func__);
+ return -EINVAL;
+found:
+
+ switch (sof_dai->dai_config->type) {
+ case SOF_DAI_INTEL_SSP:
+ case SOF_DAI_INTEL_DMIC:
+ /* no resource needs to be released for SSP and DMIC */
+ break;
+ case SOF_DAI_INTEL_HDA:
+ ret = sof_link_hda_unload(sdev, link);
+ break;
+ default:
+ dev_err(sdev->dev, "error: invalid DAI type %d\n",
+ sof_dai->dai_config->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* DAI link - used for any driver specific init */
+static int sof_route_load(struct snd_soc_component *scomp, int index,
+ struct snd_soc_dapm_route *route)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_pipe_comp_connect *connect;
+ struct snd_sof_widget *source_swidget, *sink_swidget;
+ struct snd_soc_dobj *dobj = &route->dobj;
+ struct snd_sof_route *sroute;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ /* allocate memory for sroute and connect */
+ sroute = kzalloc(sizeof(*sroute), GFP_KERNEL);
+ if (!sroute)
+ return -ENOMEM;
+
+ sroute->sdev = sdev;
+
+ connect = kzalloc(sizeof(*connect), GFP_KERNEL);
+ if (!connect) {
+ kfree(sroute);
+ return -ENOMEM;
+ }
+
+ connect->hdr.size = sizeof(*connect);
+ connect->hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_COMP_CONNECT;
+
+ dev_dbg(sdev->dev, "sink %s control %s source %s\n",
+ route->sink, route->control ? route->control : "none",
+ route->source);
+
+ /* source component */
+ source_swidget = snd_sof_find_swidget(sdev, (char *)route->source);
+ if (!source_swidget) {
+ dev_err(sdev->dev, "error: source %s not found\n",
+ route->source);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Virtual widgets of type output/out_drv may be added in topology
+ * for compatibility. These are not handled by the FW.
+ * So, don't send routes whose source/sink widget is of such types
+ * to the DSP.
+ */
+ if (source_swidget->id == snd_soc_dapm_out_drv ||
+ source_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ connect->source_id = source_swidget->comp_id;
+
+ /* sink component */
+ sink_swidget = snd_sof_find_swidget(sdev, (char *)route->sink);
+ if (!sink_swidget) {
+ dev_err(sdev->dev, "error: sink %s not found\n",
+ route->sink);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Don't send routes whose sink widget is of type
+ * output or out_drv to the DSP
+ */
+ if (sink_swidget->id == snd_soc_dapm_out_drv ||
+ sink_swidget->id == snd_soc_dapm_output)
+ goto err;
+
+ connect->sink_id = sink_swidget->comp_id;
+
+ /*
+ * For virtual routes, both sink and source are not
+ * buffer. Since only buffer linked to component is supported by
+ * FW, others are reported as error, add check in route function,
+ * do not send it to FW when both source and sink are not buffer
+ */
+ if (source_swidget->id != snd_soc_dapm_buffer &&
+ sink_swidget->id != snd_soc_dapm_buffer) {
+ dev_dbg(sdev->dev, "warning: neither Linked source component %s nor sink component %s is of buffer type, ignoring link\n",
+ route->source, route->sink);
+ ret = 0;
+ goto err;
+ } else {
+ ret = sof_ipc_tx_message(sdev->ipc,
+ connect->hdr.cmd,
+ connect, sizeof(*connect),
+ &reply, sizeof(reply));
+
+ /* check IPC return value */
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to add route sink %s control %s source %s\n",
+ route->sink,
+ route->control ? route->control : "none",
+ route->source);
+ goto err;
+ }
+
+ /* check IPC reply */
+ if (reply.error < 0) {
+ dev_err(sdev->dev, "error: DSP failed to add route sink %s control %s source %s result %d\n",
+ route->sink,
+ route->control ? route->control : "none",
+ route->source, reply.error);
+ ret = reply.error;
+ goto err;
+ }
+
+ sroute->route = route;
+ dobj->private = sroute;
+ sroute->private = connect;
+
+ /* add route to route list */
+ list_add(&sroute->list, &sdev->route_list);
+
+ return ret;
+ }
+
+err:
+ kfree(connect);
+ kfree(sroute);
+ return ret;
+}
+
+int snd_sof_complete_pipeline(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ struct sof_ipc_pipe_ready ready;
+ struct sof_ipc_reply reply;
+ int ret;
+
+ dev_dbg(sdev->dev, "tplg: complete pipeline %s id %d\n",
+ swidget->widget->name, swidget->comp_id);
+
+ memset(&ready, 0, sizeof(ready));
+ ready.hdr.size = sizeof(ready);
+ ready.hdr.cmd = SOF_IPC_GLB_TPLG_MSG | SOF_IPC_TPLG_PIPE_COMPLETE;
+ ready.comp_id = swidget->comp_id;
+
+ ret = sof_ipc_tx_message(sdev->ipc,
+ ready.hdr.cmd, &ready, sizeof(ready), &reply,
+ sizeof(reply));
+ if (ret < 0)
+ return ret;
+ return 1;
+}
+
+/* completion - called at completion of firmware loading */
+static void sof_complete(struct snd_soc_component *scomp)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_sof_widget *swidget;
+
+ /* some widget types require completion notificattion */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->complete)
+ continue;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ swidget->complete =
+ snd_sof_complete_pipeline(sdev, swidget);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* manifest - optional to inform component of manifest */
+static int sof_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ u32 size;
+ u32 abi_version;
+
+ size = le32_to_cpu(man->priv.size);
+
+ /* backward compatible with tplg without ABI info */
+ if (!size) {
+ dev_dbg(sdev->dev, "No topology ABI info\n");
+ return 0;
+ }
+
+ if (size != SOF_TPLG_ABI_SIZE) {
+ dev_err(sdev->dev, "error: invalid topology ABI size\n");
+ return -EINVAL;
+ }
+
+ dev_info(sdev->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ man->priv.data[0], man->priv.data[1],
+ man->priv.data[2], SOF_ABI_MAJOR, SOF_ABI_MINOR,
+ SOF_ABI_PATCH);
+
+ abi_version = SOF_ABI_VER(man->priv.data[0],
+ man->priv.data[1],
+ man->priv.data[2]);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, abi_version)) {
+ dev_err(sdev->dev, "error: incompatible topology ABI version\n");
+ return -EINVAL;
+ }
+
+ if (abi_version > SOF_ABI_VERSION) {
+ if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
+ dev_warn(sdev->dev, "warn: topology ABI is more recent than kernel\n");
+ } else {
+ dev_err(sdev->dev, "error: topology ABI is more recent than kernel\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* vendor specific kcontrol handlers available for binding */
+static const struct snd_soc_tplg_kcontrol_ops sof_io_ops[] = {
+ {SOF_TPLG_KCTL_VOL_ID, snd_sof_volume_get, snd_sof_volume_put},
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_get, snd_sof_bytes_put},
+ {SOF_TPLG_KCTL_ENUM_ID, snd_sof_enum_get, snd_sof_enum_put},
+ {SOF_TPLG_KCTL_SWITCH_ID, snd_sof_switch_get, snd_sof_switch_put},
+};
+
+/* vendor specific bytes ext handlers available for binding */
+static const struct snd_soc_tplg_bytes_ext_ops sof_bytes_ext_ops[] = {
+ {SOF_TPLG_KCTL_BYTES_ID, snd_sof_bytes_ext_get, snd_sof_bytes_ext_put},
+};
+
+static struct snd_soc_tplg_ops sof_tplg_ops = {
+ /* external kcontrol init - used for any driver specific init */
+ .control_load = sof_control_load,
+ .control_unload = sof_control_unload,
+
+ /* external kcontrol init - used for any driver specific init */
+ .dapm_route_load = sof_route_load,
+ .dapm_route_unload = sof_route_unload,
+
+ /* external widget init - used for any driver specific init */
+ /* .widget_load is not currently used */
+ .widget_ready = sof_widget_ready,
+ .widget_unload = sof_widget_unload,
+
+ /* FE DAI - used for any driver specific init */
+ .dai_load = sof_dai_load,
+ .dai_unload = sof_dai_unload,
+
+ /* DAI link - used for any driver specific init */
+ .link_load = sof_link_load,
+ .link_unload = sof_link_unload,
+
+ /* completion - called at completion of firmware loading */
+ .complete = sof_complete,
+
+ /* manifest - optional to inform component of manifest */
+ .manifest = sof_manifest,
+
+ /* vendor specific kcontrol handlers available for binding */
+ .io_ops = sof_io_ops,
+ .io_ops_count = ARRAY_SIZE(sof_io_ops),
+
+ /* vendor specific bytes ext handlers available for binding */
+ .bytes_ext_ops = sof_bytes_ext_ops,
+ .bytes_ext_ops_count = ARRAY_SIZE(sof_bytes_ext_ops),
+};
+
+int snd_sof_init_topology(struct snd_sof_dev *sdev,
+ struct snd_soc_tplg_ops *ops)
+{
+ /* TODO: support linked list of topologies */
+ sdev->tplg_ops = ops;
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_init_topology);
+
+int snd_sof_load_topology(struct snd_sof_dev *sdev, const char *file)
+{
+ const struct firmware *fw;
+ int ret;
+
+ dev_dbg(sdev->dev, "loading topology:%s\n", file);
+
+ ret = request_firmware(&fw, file, sdev->dev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: tplg request firmware %s failed err: %d\n",
+ file, ret);
+ return ret;
+ }
+
+ ret = snd_soc_tplg_component_load(sdev->component,
+ &sof_tplg_ops, fw,
+ SND_SOC_TPLG_INDEX_ALL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: tplg component load failed %d\n",
+ ret);
+ ret = -EINVAL;
+ }
+
+ release_firmware(fw);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_load_topology);
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
new file mode 100644
index 000000000000..d588e4b70fad
--- /dev/null
+++ b/sound/soc/sof/trace.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+//
+
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+#include "sof-priv.h"
+#include "ops.h"
+
+static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
+ loff_t pos, size_t buffer_size)
+{
+ wait_queue_entry_t wait;
+ loff_t host_offset = READ_ONCE(sdev->host_offset);
+
+ /*
+ * If host offset is less than local pos, it means write pointer of
+ * host DMA buffer has been wrapped. We should output the trace data
+ * at the end of host DMA buffer at first.
+ */
+ if (host_offset < pos)
+ return buffer_size - pos;
+
+ /* If there is available trace data now, it is unnecessary to wait. */
+ if (host_offset > pos)
+ return host_offset - pos;
+
+ /* wait for available trace data from FW */
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&sdev->trace_sleep, &wait);
+
+ if (!signal_pending(current)) {
+ /* set timeout to max value, no error code */
+ schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+ }
+ remove_wait_queue(&sdev->trace_sleep, &wait);
+
+ /* return bytes available for copy */
+ host_offset = READ_ONCE(sdev->host_offset);
+ if (host_offset < pos)
+ return buffer_size - pos;
+
+ return host_offset - pos;
+}
+
+static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ unsigned long rem;
+ loff_t lpos = *ppos;
+ size_t avail, buffer_size = dfse->size;
+ u64 lpos_64;
+
+ /* make sure we know about any failures on the DSP side */
+ sdev->dtrace_error = false;
+
+ /* check pos and count */
+ if (lpos < 0)
+ return -EINVAL;
+ if (!count)
+ return 0;
+
+ /* check for buffer wrap and count overflow */
+ lpos_64 = lpos;
+ lpos = do_div(lpos_64, buffer_size);
+
+ if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
+ count = buffer_size - lpos;
+
+ /* get available count based on current host offset */
+ avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
+ if (sdev->dtrace_error) {
+ dev_err(sdev->dev, "error: trace IO error\n");
+ return -EIO;
+ }
+
+ /* make sure count is <= avail */
+ count = avail > count ? count : avail;
+
+ /* copy available trace data to debugfs */
+ rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
+ if (rem)
+ return -EFAULT;
+
+ *ppos += count;
+
+ /* move debugfs reading position */
+ return count;
+}
+
+static const struct file_operations sof_dfs_trace_fops = {
+ .open = simple_open,
+ .read = sof_dfsentry_trace_read,
+ .llseek = default_llseek,
+};
+
+static int trace_debugfs_create(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ if (!sdev)
+ return -EINVAL;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->buf = sdev->dmatb.area;
+ dfse->size = sdev->dmatb.bytes;
+ dfse->sdev = sdev;
+
+ dfse->dfsentry = debugfs_create_file("trace", 0444, sdev->debugfs_root,
+ dfse, &sof_dfs_trace_fops);
+ if (!dfse->dfsentry) {
+ /* can't rely on debugfs, only log error and keep going */
+ dev_err(sdev->dev,
+ "error: cannot create debugfs entry for trace\n");
+ }
+
+ return 0;
+}
+
+int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_dma_trace_params params;
+ struct sof_ipc_reply ipc_reply;
+ int ret;
+
+ if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
+ return -EINVAL;
+
+ /* set IPC parameters */
+ params.hdr.size = sizeof(params);
+ params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_PARAMS;
+ params.buffer.phy_addr = sdev->dmatp.addr;
+ params.buffer.size = sdev->dmatb.bytes;
+ params.buffer.pages = sdev->dma_trace_pages;
+ params.stream_tag = 0;
+
+ sdev->host_offset = 0;
+
+ ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: fail in snd_sof_dma_trace_init %d\n", ret);
+ return ret;
+ }
+ dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
+
+ /* send IPC to the DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ params.hdr.cmd, &params, sizeof(params),
+ &ipc_reply, sizeof(ipc_reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't set params for DMA for trace %d\n", ret);
+ goto trace_release;
+ }
+
+ ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
+ goto trace_release;
+ }
+
+ sdev->dtrace_is_enabled = true;
+
+ return 0;
+
+trace_release:
+ snd_sof_dma_trace_release(sdev);
+ return ret;
+}
+
+int snd_sof_init_trace(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ /* set false before start initialization */
+ sdev->dtrace_is_enabled = false;
+
+ /* allocate trace page table buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
+ PAGE_SIZE, &sdev->dmatp);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't alloc page table for trace %d\n", ret);
+ return ret;
+ }
+
+ /* allocate trace data buffer */
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
+ DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: can't alloc buffer for trace %d\n", ret);
+ goto page_err;
+ }
+
+ /* create compressed page table for audio firmware */
+ ret = snd_sof_create_page_table(sdev, &sdev->dmatb, sdev->dmatp.area,
+ sdev->dmatb.bytes);
+ if (ret < 0)
+ goto table_err;
+
+ sdev->dma_trace_pages = ret;
+ dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
+
+ if (sdev->first_boot) {
+ ret = trace_debugfs_create(sdev);
+ if (ret < 0)
+ goto table_err;
+ }
+
+ init_waitqueue_head(&sdev->trace_sleep);
+
+ ret = snd_sof_init_trace_ipc(sdev);
+ if (ret < 0)
+ goto table_err;
+
+ return 0;
+table_err:
+ sdev->dma_trace_pages = 0;
+ snd_dma_free_pages(&sdev->dmatb);
+page_err:
+ snd_dma_free_pages(&sdev->dmatp);
+ return ret;
+}
+EXPORT_SYMBOL(snd_sof_init_trace);
+
+int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
+ struct sof_ipc_dma_trace_posn *posn)
+{
+ if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
+ sdev->host_offset = posn->host_offset;
+ wake_up(&sdev->trace_sleep);
+ }
+
+ if (posn->overflow != 0)
+ dev_err(sdev->dev,
+ "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
+ posn->overflow, posn->messages);
+
+ return 0;
+}
+
+/* an error has occurred within the DSP that prevents further trace */
+void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
+{
+ if (sdev->dtrace_is_enabled) {
+ dev_err(sdev->dev, "error: waking up any trace sleepers\n");
+ sdev->dtrace_error = true;
+ wake_up(&sdev->trace_sleep);
+ }
+}
+EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
+
+void snd_sof_release_trace(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (!sdev->dtrace_is_enabled)
+ return;
+
+ ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
+
+ ret = snd_sof_dma_trace_release(sdev);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: fail in snd_sof_dma_trace_release %d\n", ret);
+
+ sdev->dtrace_is_enabled = false;
+}
+EXPORT_SYMBOL(snd_sof_release_trace);
+
+void snd_sof_free_trace(struct snd_sof_dev *sdev)
+{
+ snd_sof_release_trace(sdev);
+
+ snd_dma_free_pages(&sdev->dmatb);
+ snd_dma_free_pages(&sdev->dmatp);
+}
+EXPORT_SYMBOL(snd_sof_free_trace);
diff --git a/sound/soc/sof/utils.c b/sound/soc/sof/utils.c
new file mode 100644
index 000000000000..2ac4c3da0320
--- /dev/null
+++ b/sound/soc/sof/utils.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Keyon Jie <yang.jie@linux.intel.com>
+//
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/sof.h>
+#include "sof-priv.h"
+
+/*
+ * Register IO
+ *
+ * The sof_io_xyz() wrappers are typically referenced in snd_sof_dsp_ops
+ * structures and cannot be inlined.
+ */
+
+void sof_io_write(struct snd_sof_dev *sdev, void __iomem *addr, u32 value)
+{
+ writel(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write);
+
+u32 sof_io_read(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL(sof_io_read);
+
+void sof_io_write64(struct snd_sof_dev *sdev, void __iomem *addr, u64 value)
+{
+ writeq(value, addr);
+}
+EXPORT_SYMBOL(sof_io_write64);
+
+u64 sof_io_read64(struct snd_sof_dev *sdev, void __iomem *addr)
+{
+ return readq(addr);
+}
+EXPORT_SYMBOL(sof_io_read64);
+
+/*
+ * IPC Mailbox IO
+ */
+
+void sof_mailbox_write(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *dest = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_toio(dest, message, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_write);
+
+void sof_mailbox_read(struct snd_sof_dev *sdev, u32 offset,
+ void *message, size_t bytes)
+{
+ void __iomem *src = sdev->bar[sdev->mailbox_bar] + offset;
+
+ memcpy_fromio(message, src, bytes);
+}
+EXPORT_SYMBOL(sof_mailbox_read);
+
+/*
+ * Memory copy.
+ */
+
+void sof_block_write(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *src,
+ size_t size)
+{
+ void __iomem *dest = sdev->bar[bar] + offset;
+ const u8 *src_byte = src;
+ u32 affected_mask;
+ u32 tmp;
+ int m, n;
+
+ m = size / 4;
+ n = size % 4;
+
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy(dest, src, m);
+
+ if (n) {
+ affected_mask = (1 << (8 * n)) - 1;
+
+ /* first read the 32bit data of dest, then change affected
+ * bytes, and write back to dest. For unaffected bytes, it
+ * should not be changed
+ */
+ tmp = ioread32(dest + m * 4);
+ tmp &= ~affected_mask;
+
+ tmp |= *(u32 *)(src_byte + m * 4) & affected_mask;
+ iowrite32(tmp, dest + m * 4);
+ }
+}
+EXPORT_SYMBOL(sof_block_write);
+
+void sof_block_read(struct snd_sof_dev *sdev, u32 bar, u32 offset, void *dest,
+ size_t size)
+{
+ void __iomem *src = sdev->bar[bar] + offset;
+
+ memcpy_fromio(dest, src, size);
+}
+EXPORT_SYMBOL(sof_block_read);
diff --git a/sound/soc/sof/xtensa/Kconfig b/sound/soc/sof/xtensa/Kconfig
new file mode 100644
index 000000000000..8a9343b85146
--- /dev/null
+++ b/sound/soc/sof/xtensa/Kconfig
@@ -0,0 +1,2 @@
+config SND_SOC_SOF_XTENSA
+ tristate
diff --git a/sound/soc/sof/xtensa/Makefile b/sound/soc/sof/xtensa/Makefile
new file mode 100644
index 000000000000..cc89c7472a38
--- /dev/null
+++ b/sound/soc/sof/xtensa/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+snd-sof-xtensa-dsp-objs := core.o
+
+obj-$(CONFIG_SND_SOC_SOF_XTENSA) += snd-sof-xtensa-dsp.o
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
new file mode 100644
index 000000000000..c3ad23a85b99
--- /dev/null
+++ b/sound/soc/sof/xtensa/core.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+//
+// Author: Pan Xiuli <xiuli.pan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include "../sof-priv.h"
+
+struct xtensa_exception_cause {
+ u32 id;
+ const char *msg;
+ const char *description;
+};
+
+/*
+ * From 4.4.1.5 table 4-64 Exception Causes of Xtensa
+ * Instruction Set Architecture (ISA) Reference Manual
+ */
+static const struct xtensa_exception_cause xtensa_exception_causes[] = {
+ {0, "IllegalInstructionCause", "Illegal instruction"},
+ {1, "SyscallCause", "SYSCALL instruction"},
+ {2, "InstructionFetchErrorCause",
+ "Processor internal physical address or data error during instruction fetch"},
+ {3, "LoadStoreErrorCause",
+ "Processor internal physical address or data error during load or store"},
+ {4, "Level1InterruptCause",
+ "Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"},
+ {5, "AllocaCause",
+ "MOVSP instruction, if caller’s registers are not in the register file"},
+ {6, "IntegerDivideByZeroCause",
+ "QUOS, QUOU, REMS, or REMU divisor operand is zero"},
+ {8, "PrivilegedCause",
+ "Attempt to execute a privileged operation when CRING ? 0"},
+ {9, "LoadStoreAlignmentCause", "Load or store to an unaligned address"},
+ {12, "InstrPIFDataErrorCause",
+ "PIF data error during instruction fetch"},
+ {13, "LoadStorePIFDataErrorCause",
+ "Synchronous PIF data error during LoadStore access"},
+ {14, "InstrPIFAddrErrorCause",
+ "PIF address error during instruction fetch"},
+ {15, "LoadStorePIFAddrErrorCause",
+ "Synchronous PIF address error during LoadStore access"},
+ {16, "InstTLBMissCause", "Error during Instruction TLB refill"},
+ {17, "InstTLBMultiHitCause",
+ "Multiple instruction TLB entries matched"},
+ {18, "InstFetchPrivilegeCause",
+ "An instruction fetch referenced a virtual address at a ring level less than CRING"},
+ {20, "InstFetchProhibitedCause",
+ "An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch"},
+ {24, "LoadStoreTLBMissCause",
+ "Error during TLB refill for a load or store"},
+ {25, "LoadStoreTLBMultiHitCause",
+ "Multiple TLB entries matched for a load or store"},
+ {26, "LoadStorePrivilegeCause",
+ "A load or store referenced a virtual address at a ring level less than CRING"},
+ {28, "LoadProhibitedCause",
+ "A load referenced a page mapped with an attribute that does not permit loads"},
+ {32, "Coprocessor0Disabled",
+ "Coprocessor 0 instruction when cp0 disabled"},
+ {33, "Coprocessor1Disabled",
+ "Coprocessor 1 instruction when cp1 disabled"},
+ {34, "Coprocessor2Disabled",
+ "Coprocessor 2 instruction when cp2 disabled"},
+ {35, "Coprocessor3Disabled",
+ "Coprocessor 3 instruction when cp3 disabled"},
+ {36, "Coprocessor4Disabled",
+ "Coprocessor 4 instruction when cp4 disabled"},
+ {37, "Coprocessor5Disabled",
+ "Coprocessor 5 instruction when cp5 disabled"},
+ {38, "Coprocessor6Disabled",
+ "Coprocessor 6 instruction when cp6 disabled"},
+ {39, "Coprocessor7Disabled",
+ "Coprocessor 7 instruction when cp7 disabled"},
+};
+
+/* only need xtensa atm */
+static void xtensa_dsp_oops(struct snd_sof_dev *sdev, void *oops)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ int i;
+
+ dev_err(sdev->dev, "error: DSP Firmware Oops\n");
+ for (i = 0; i < ARRAY_SIZE(xtensa_exception_causes); i++) {
+ if (xtensa_exception_causes[i].id == xoops->exccause) {
+ dev_err(sdev->dev, "error: Exception Cause: %s, %s\n",
+ xtensa_exception_causes[i].msg,
+ xtensa_exception_causes[i].description);
+ }
+ }
+ dev_err(sdev->dev, "EXCCAUSE 0x%8.8x EXCVADDR 0x%8.8x PS 0x%8.8x SAR 0x%8.8x\n",
+ xoops->exccause, xoops->excvaddr, xoops->ps, xoops->sar);
+ dev_err(sdev->dev, "EPC1 0x%8.8x EPC2 0x%8.8x EPC3 0x%8.8x EPC4 0x%8.8x",
+ xoops->epc1, xoops->epc2, xoops->epc3, xoops->epc4);
+ dev_err(sdev->dev, "EPC5 0x%8.8x EPC6 0x%8.8x EPC7 0x%8.8x DEPC 0x%8.8x",
+ xoops->epc5, xoops->epc6, xoops->epc7, xoops->depc);
+ dev_err(sdev->dev, "EPS2 0x%8.8x EPS3 0x%8.8x EPS4 0x%8.8x EPS5 0x%8.8x",
+ xoops->eps2, xoops->eps3, xoops->eps4, xoops->eps5);
+ dev_err(sdev->dev, "EPS6 0x%8.8x EPS7 0x%8.8x INTENABL 0x%8.8x INTERRU 0x%8.8x",
+ xoops->eps6, xoops->eps7, xoops->intenable, xoops->interrupt);
+}
+
+static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
+ u32 stack_words)
+{
+ struct sof_ipc_dsp_oops_xtensa *xoops = oops;
+ u32 stack_ptr = xoops->stack;
+ /* 4 * 8chars + 3 ws + 1 terminating NUL */
+ unsigned char buf[4 * 8 + 3 + 1];
+ int i;
+
+ dev_err(sdev->dev, "stack dump from 0x%8.8x\n", stack_ptr);
+
+ /*
+ * example output:
+ * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
+ */
+ for (i = 0; i < stack_words; i += 4) {
+ hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+ buf, sizeof(buf), false);
+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+ }
+}
+
+const struct sof_arch_ops sof_xtensa_arch_ops = {
+ .dsp_oops = xtensa_dsp_oops,
+ .dsp_stack = xtensa_stack,
+};
+EXPORT_SYMBOL(sof_xtensa_arch_ops);
+
+MODULE_DESCRIPTION("SOF Xtensa DSP support");
+MODULE_LICENSE("Dual BSD/GPL");