aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown2022-03-11 20:21:08 +0000
committerMark Brown2022-03-11 20:21:08 +0000
commitc1156cce4719ab2633575dd4822606f959142ef7 (patch)
tree0132316045b785f0d411a40a1a1e2b3f13dac3d9
parent375a347da4889f64d86e1ab7f4e6702b6e9bf299 (diff)
parent092cf7b26a48a542d9aabfc0f517fa263102a4c1 (diff)
ASoC: Intel: AVS - Audio DSP for cAVS
Merge series from Cezary Rojewski <cezary.rojewski@intel.com>: A continuation of cleanup work of Intel SST solutions found in sound/soc/intel/. With two major chapters released last year catpt [1] and removal of haswell solution [2], time has come for Skylake-driver. Througout 2019, 2020 and 2021 Skylake-driver has had many fixes applied and even attempts of refactors as seen in fundamental overhaul [3], IPC flow adjustments [4] and LARGE_CONFIG overhaul [5] series. Unfortunately, story repeats itself - problems are found within the core of a driver. Painting it with different colors does not change the fact that is it still a house of cards. As changes needed to address those issues would make Skylake solution incompatible with its previous revisions, a decision has been made to provide a new solution instead. In time it would deprecate and replace Skylake-driver. That solution has been called AVS - from AudioDSP architecture name: Audio-Voice-Speech. It is meant to provide support for the exact same range of platforms as its predecessor: SKL, KBL, AML and APL. Note: this series is dependent upon HDA-series [6] which exposes several codec-organization functions allowing for reduced code size on avs-driver side. Note: this series does not add fully functional driver as its size would get out of control. Here, focus is put on adding IPC protocol and code loading code.
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/sound/hdaudio_ext.h50
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--sound/soc/intel/Kconfig12
-rw-r--r--sound/soc/intel/Makefile1
-rw-r--r--sound/soc/intel/avs/Makefile6
-rw-r--r--sound/soc/intel/avs/avs.h247
-rw-r--r--sound/soc/intel/avs/cldma.c316
-rw-r--r--sound/soc/intel/avs/cldma.h29
-rw-r--r--sound/soc/intel/avs/core.c61
-rw-r--r--sound/soc/intel/avs/dsp.c302
-rw-r--r--sound/soc/intel/avs/ipc.c382
-rw-r--r--sound/soc/intel/avs/loader.c608
-rw-r--r--sound/soc/intel/avs/messages.c695
-rw-r--r--sound/soc/intel/avs/messages.h752
-rw-r--r--sound/soc/intel/avs/registers.h75
-rw-r--r--sound/soc/intel/avs/utils.c301
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c15
19 files changed, 3857 insertions, 0 deletions
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6a90ce405e60..15f15075238d 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -448,6 +449,8 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr)
#define snd_hdac_reg_readl(bus, addr) readl(addr)
+#define snd_hdac_reg_writeq(bus, addr, val) writeq(val, addr)
+#define snd_hdac_reg_readq(bus, addr) readq(addr)
/*
* macros for easy use
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 77123c3e4095..5a538799ff77 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -2,6 +2,8 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
@@ -143,6 +145,54 @@ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
writew(((readw(addr + reg) & ~(mask)) | (val)), \
addr + reg)
+#define snd_hdac_adsp_writeb(chip, reg, value) \
+ snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readb(chip, reg) \
+ snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writew(chip, reg, value) \
+ snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readw(chip, reg) \
+ snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writel(chip, reg, value) \
+ snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readl(chip, reg) \
+ snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
+#define snd_hdac_adsp_writeq(chip, reg, value) \
+ snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
+#define snd_hdac_adsp_readq(chip, reg) \
+ snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
+
+#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
+ snd_hdac_adsp_writeb(chip, reg, \
+ (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
+ snd_hdac_adsp_writew(chip, reg, \
+ (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
+ snd_hdac_adsp_writel(chip, reg, \
+ (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
+ snd_hdac_adsp_writeq(chip, reg, \
+ (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
+
+#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
+ readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_stream_readb_poll(strm, reg, val, cond, delay_us, timeout_us) \
+ readb_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
+ delay_us, timeout_us)
+#define snd_hdac_stream_readl_poll(strm, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
+ delay_us, timeout_us)
struct hdac_ext_device;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index c3039e97929a..ebb8e7a7fc29 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -429,6 +429,7 @@ struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(
const struct snd_soc_dapm_widget *widget);
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f3a4a907b29d..d025ca0c77fa 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -209,5 +209,17 @@ config SND_SOC_INTEL_KEEMBAY
If you have a Intel Keembay platform then enable this option
by saying Y or m.
+config SND_SOC_INTEL_AVS
+ tristate "Intel AVS driver"
+ depends on PCI && ACPI
+ depends on COMMON_CLK
+ select SND_SOC_ACPI
+ select SND_HDA_EXT_CORE
+ select SND_HDA_DSP_LOADER
+ help
+ Enable support for Intel(R) cAVS 1.5 platforms with DSP
+ capabilities. This includes Skylake, Kabylake, Amberlake and
+ Apollolake.
+
# ASoC codec drivers
source "sound/soc/intel/boards/Kconfig"
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 7c5038803be7..d44b2652c707 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
+obj-$(CONFIG_SND_SOC_INTEL_AVS) += avs/
# Machine support
obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/avs/Makefile b/sound/soc/intel/avs/Makefile
new file mode 100644
index 000000000000..f842bfc5e97e
--- /dev/null
+++ b/sound/soc/intel/avs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+snd-soc-avs-objs := dsp.o ipc.o messages.o utils.o core.o loader.o
+snd-soc-avs-objs += cldma.o
+
+obj-$(CONFIG_SND_SOC_INTEL_AVS) += snd-soc-avs.o
diff --git a/sound/soc/intel/avs/avs.h b/sound/soc/intel/avs/avs.h
new file mode 100644
index 000000000000..b48a342fd184
--- /dev/null
+++ b/sound/soc/intel/avs/avs.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_INTEL_AVS_H
+#define __SOUND_SOC_INTEL_AVS_H
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
+#include "messages.h"
+#include "registers.h"
+
+struct avs_dev;
+
+/*
+ * struct avs_dsp_ops - Platform-specific DSP operations
+ *
+ * @power: Power on or off DSP cores
+ * @reset: Enter or exit reset state on DSP cores
+ * @stall: Stall or run DSP cores
+ * @irq_handler: Top half of IPC servicing
+ * @irq_thread: Bottom half of IPC servicing
+ * @int_control: Enable or disable IPC interrupts
+ */
+struct avs_dsp_ops {
+ int (* const power)(struct avs_dev *, u32, bool);
+ int (* const reset)(struct avs_dev *, u32, bool);
+ int (* const stall)(struct avs_dev *, u32, bool);
+ irqreturn_t (* const irq_handler)(int, void *);
+ irqreturn_t (* const irq_thread)(int, void *);
+ void (* const int_control)(struct avs_dev *, bool);
+ int (* const load_basefw)(struct avs_dev *, struct firmware *);
+ int (* const load_lib)(struct avs_dev *, struct firmware *, u32);
+ int (* const transfer_mods)(struct avs_dev *, bool, struct avs_module_entry *, u32);
+};
+
+#define avs_dsp_op(adev, op, ...) \
+ ((adev)->spec->dsp_ops->op(adev, ## __VA_ARGS__))
+
+#define AVS_PLATATTR_CLDMA BIT_ULL(0)
+#define AVS_PLATATTR_IMR BIT_ULL(1)
+
+#define avs_platattr_test(adev, attr) \
+ ((adev)->spec->attributes & AVS_PLATATTR_##attr)
+
+/* Platform specific descriptor */
+struct avs_spec {
+ const char *name;
+
+ const struct avs_dsp_ops *const dsp_ops;
+ struct avs_fw_version min_fw_version; /* anything below is rejected */
+
+ const u32 core_init_mask; /* used during DSP boot */
+ const u64 attributes; /* bitmask of AVS_PLATATTR_* */
+ const u32 sram_base_offset;
+ const u32 sram_window_size;
+ const u32 rom_status;
+};
+
+struct avs_fw_entry {
+ char *name;
+ const struct firmware *fw;
+
+ struct list_head node;
+};
+
+/*
+ * struct avs_dev - Intel HD-Audio driver data
+ *
+ * @dev: PCI device
+ * @dsp_ba: DSP bar address
+ * @spec: platform-specific descriptor
+ * @fw_cfg: Firmware configuration, obtained through FW_CONFIG message
+ * @hw_cfg: Hardware configuration, obtained through HW_CONFIG message
+ * @mods_info: Available module-types, obtained through MODULES_INFO message
+ * @mod_idas: Module instance ID pool, one per module-type
+ * @modres_mutex: For synchronizing any @mods_info updates
+ * @ppl_ida: Pipeline instance ID pool
+ * @fw_list: List of libraries loaded, including base firmware
+ */
+struct avs_dev {
+ struct hda_bus base;
+ struct device *dev;
+
+ void __iomem *dsp_ba;
+ const struct avs_spec *spec;
+ struct avs_ipc *ipc;
+
+ struct avs_fw_cfg fw_cfg;
+ struct avs_hw_cfg hw_cfg;
+ struct avs_mods_info *mods_info;
+ struct ida **mod_idas;
+ struct mutex modres_mutex;
+ struct ida ppl_ida;
+ struct list_head fw_list;
+ int *core_refs; /* reference count per core */
+ char **lib_names;
+
+ struct completion fw_ready;
+};
+
+/* from hda_bus to avs_dev */
+#define hda_to_avs(hda) container_of(hda, struct avs_dev, base)
+/* from hdac_bus to avs_dev */
+#define hdac_to_avs(hdac) hda_to_avs(to_hda_bus(hdac))
+/* from device to avs_dev */
+#define to_avs_dev(dev) \
+({ \
+ struct hdac_bus *__bus = dev_get_drvdata(dev); \
+ hdac_to_avs(__bus); \
+})
+
+int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power);
+int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset);
+int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall);
+int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask);
+int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask);
+
+/* Inter Process Communication */
+
+struct avs_ipc_msg {
+ union {
+ u64 header;
+ union avs_global_msg glb;
+ union avs_reply_msg rsp;
+ };
+ void *data;
+ size_t size;
+};
+
+/*
+ * struct avs_ipc - DSP IPC context
+ *
+ * @dev: PCI device
+ * @rx: Reply message cache
+ * @default_timeout_ms: default message timeout in MS
+ * @ready: whether firmware is ready and communication is open
+ * @rx_completed: whether RX for previously sent TX has been received
+ * @rx_lock: for serializing manipulation of rx_* fields
+ * @msg_lock: for synchronizing request handling
+ * @done_completion: DONE-part of IPC i.e. ROM and ACKs from FW
+ * @busy_completion: BUSY-part of IPC i.e. receiving responses from FW
+ */
+struct avs_ipc {
+ struct device *dev;
+
+ struct avs_ipc_msg rx;
+ u32 default_timeout_ms;
+ bool ready;
+
+ bool rx_completed;
+ spinlock_t rx_lock;
+ struct mutex msg_mutex;
+ struct completion done_completion;
+ struct completion busy_completion;
+};
+
+#define AVS_EIPC EREMOTEIO
+/*
+ * IPC handlers may return positive value (firmware error code) what denotes
+ * successful HOST <-> DSP communication yet failure to process specific request.
+ *
+ * Below macro converts returned value to linux kernel error code.
+ * All IPC callers MUST use it as soon as firmware error code is consumed.
+ */
+#define AVS_IPC_RET(ret) \
+ (((ret) <= 0) ? (ret) : -AVS_EIPC)
+
+static inline void avs_ipc_err(struct avs_dev *adev, struct avs_ipc_msg *tx,
+ const char *name, int error)
+{
+ /*
+ * If IPC channel is blocked e.g.: due to ongoing recovery,
+ * -EPERM error code is expected and thus it's not an actual error.
+ */
+ if (error == -EPERM)
+ dev_dbg(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name,
+ tx->glb.primary, tx->glb.ext.val, error);
+ else
+ dev_err(adev->dev, "%s 0x%08x 0x%08x failed: %d\n", name,
+ tx->glb.primary, tx->glb.ext.val, error);
+}
+
+irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id);
+irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id);
+void avs_dsp_process_response(struct avs_dev *adev, u64 header);
+int avs_dsp_send_msg_timeout(struct avs_dev *adev,
+ struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout);
+int avs_dsp_send_msg(struct avs_dev *adev,
+ struct avs_ipc_msg *request, struct avs_ipc_msg *reply);
+int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev,
+ struct avs_ipc_msg *request, int timeout);
+int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request);
+void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable);
+int avs_ipc_init(struct avs_ipc *ipc, struct device *dev);
+void avs_ipc_block(struct avs_ipc *ipc);
+
+/* Firmware resources management */
+
+int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry);
+int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry);
+int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid);
+bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id);
+
+int avs_module_info_init(struct avs_dev *adev, bool purge);
+void avs_module_info_free(struct avs_dev *adev);
+int avs_module_id_alloc(struct avs_dev *adev, u16 module_id);
+void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id);
+int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name);
+void avs_release_last_firmware(struct avs_dev *adev);
+void avs_release_firmwares(struct avs_dev *adev);
+
+int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
+ u8 core_id, u8 domain, void *param, u32 param_size,
+ u16 *instance_id);
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+ u8 ppl_instance_id, u8 core_id);
+int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
+ bool lp, u16 attributes, u8 *instance_id);
+int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id);
+
+/* Firmware loading */
+
+void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable);
+void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable);
+void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable);
+
+int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge);
+int avs_dsp_first_boot_firmware(struct avs_dev *adev);
+
+int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw);
+int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id);
+int avs_cldma_transfer_modules(struct avs_dev *adev, bool load,
+ struct avs_module_entry *mods, u32 num_mods);
+int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw);
+int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id);
+int avs_hda_transfer_modules(struct avs_dev *adev, bool load,
+ struct avs_module_entry *mods, u32 num_mods);
+
+#endif /* __SOUND_SOC_INTEL_AVS_H */
diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
new file mode 100644
index 000000000000..d100c6ba4d8a
--- /dev/null
+++ b/sound/soc/intel/avs/cldma.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <linux/pci.h>
+#include <sound/hda_register.h>
+#include <sound/hdaudio_ext.h>
+#include "cldma.h"
+#include "registers.h"
+
+/* Stream Registers */
+#define AZX_CL_SD_BASE 0x80
+#define AZX_SD_CTL_STRM_MASK GENMASK(23, 20)
+#define AZX_SD_CTL_STRM(s) (((s)->stream_tag << 20) & AZX_SD_CTL_STRM_MASK)
+#define AZX_SD_BDLPL_BDLPLBA_MASK GENMASK(31, 7)
+#define AZX_SD_BDLPL_BDLPLBA(lb) ((lb) & AZX_SD_BDLPL_BDLPLBA_MASK)
+
+/* Software Position Based FIFO Capability Registers */
+#define AZX_CL_SPBFCS 0x20
+#define AZX_REG_CL_SPBFCTL (AZX_CL_SPBFCS + 0x4)
+#define AZX_REG_CL_SD_SPIB (AZX_CL_SPBFCS + 0x8)
+
+#define AVS_CL_OP_INTERVAL_US 3
+#define AVS_CL_OP_TIMEOUT_US 300
+#define AVS_CL_IOC_TIMEOUT_MS 300
+#define AVS_CL_STREAM_INDEX 0
+
+struct hda_cldma {
+ struct device *dev;
+ struct hdac_bus *bus;
+ void __iomem *dsp_ba;
+
+ unsigned int buffer_size;
+ unsigned int num_periods;
+ unsigned int stream_tag;
+ void __iomem *sd_addr;
+
+ struct snd_dma_buffer dmab_data;
+ struct snd_dma_buffer dmab_bdl;
+ struct delayed_work memcpy_work;
+ struct completion completion;
+
+ /* runtime */
+ void *position;
+ unsigned int remaining;
+ unsigned int sd_status;
+};
+
+static void cldma_memcpy_work(struct work_struct *work);
+
+struct hda_cldma code_loader = {
+ .stream_tag = AVS_CL_STREAM_INDEX + 1,
+ .memcpy_work = __DELAYED_WORK_INITIALIZER(code_loader.memcpy_work, cldma_memcpy_work, 0),
+ .completion = COMPLETION_INITIALIZER(code_loader.completion),
+};
+
+void hda_cldma_fill(struct hda_cldma *cl)
+{
+ unsigned int size, offset;
+
+ if (cl->remaining > cl->buffer_size)
+ size = cl->buffer_size;
+ else
+ size = cl->remaining;
+
+ offset = snd_hdac_stream_readl(cl, CL_SD_SPIB);
+ if (offset + size > cl->buffer_size) {
+ unsigned int ss;
+
+ ss = cl->buffer_size - offset;
+ memcpy(cl->dmab_data.area + offset, cl->position, ss);
+ offset = 0;
+ size -= ss;
+ cl->position += ss;
+ cl->remaining -= ss;
+ }
+
+ memcpy(cl->dmab_data.area + offset, cl->position, size);
+ cl->position += size;
+ cl->remaining -= size;
+
+ snd_hdac_stream_writel(cl, CL_SD_SPIB, offset + size);
+}
+
+static void cldma_memcpy_work(struct work_struct *work)
+{
+ struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work);
+ int ret;
+
+ ret = hda_cldma_start(cl);
+ if (ret < 0) {
+ dev_err(cl->dev, "cldma set RUN failed: %d\n", ret);
+ return;
+ }
+
+ while (true) {
+ ret = wait_for_completion_timeout(&cl->completion,
+ msecs_to_jiffies(AVS_CL_IOC_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(cl->dev, "cldma IOC timeout\n");
+ break;
+ }
+
+ if (!(cl->sd_status & SD_INT_COMPLETE)) {
+ dev_err(cl->dev, "cldma transfer error, SD status: 0x%08x\n",
+ cl->sd_status);
+ break;
+ }
+
+ if (!cl->remaining)
+ break;
+
+ reinit_completion(&cl->completion);
+ hda_cldma_fill(cl);
+ /* enable CLDMA interrupt */
+ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
+ AVS_ADSP_ADSPIC_CLDMA);
+ }
+}
+
+void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay)
+{
+ if (!cl->remaining)
+ return;
+
+ reinit_completion(&cl->completion);
+ /* fill buffer with the first chunk before scheduling run */
+ hda_cldma_fill(cl);
+
+ schedule_delayed_work(&cl->memcpy_work, start_delay);
+}
+
+int hda_cldma_start(struct hda_cldma *cl)
+{
+ unsigned int reg;
+
+ /* enable interrupts */
+ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA,
+ AVS_ADSP_ADSPIC_CLDMA);
+ snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START,
+ SD_INT_MASK | SD_CTL_DMA_START);
+
+ /* await DMA engine start */
+ return snd_hdac_stream_readb_poll(cl, SD_CTL, reg, reg & SD_CTL_DMA_START,
+ AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
+}
+
+int hda_cldma_stop(struct hda_cldma *cl)
+{
+ unsigned int reg;
+ int ret;
+
+ /* disable interrupts */
+ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
+ snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, 0);
+
+ /* await DMA engine stop */
+ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_DMA_START),
+ AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
+ cancel_delayed_work_sync(&cl->memcpy_work);
+
+ return ret;
+}
+
+int hda_cldma_reset(struct hda_cldma *cl)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = hda_cldma_stop(cl);
+ if (ret < 0) {
+ dev_err(cl->dev, "cldma stop failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_hdac_stream_updateb(cl, SD_CTL, 1, 1);
+ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & 1), AVS_CL_OP_INTERVAL_US,
+ AVS_CL_OP_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(cl->dev, "cldma set SRST failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_hdac_stream_updateb(cl, SD_CTL, 1, 0);
+ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & 1), AVS_CL_OP_INTERVAL_US,
+ AVS_CL_OP_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size)
+{
+ /* setup runtime */
+ cl->position = data;
+ cl->remaining = size;
+}
+
+static void cldma_setup_bdle(struct hda_cldma *cl, u32 bdle_size)
+{
+ struct snd_dma_buffer *dmab = &cl->dmab_data;
+ __le32 *bdl = (__le32 *)cl->dmab_bdl.area;
+ int remaining = cl->buffer_size;
+ int offset = 0;
+
+ cl->num_periods = 0;
+
+ while (remaining > 0) {
+ phys_addr_t addr;
+ int chunk;
+
+ addr = snd_sgbuf_get_addr(dmab, offset);
+ bdl[0] = cpu_to_le32(lower_32_bits(addr));
+ bdl[1] = cpu_to_le32(upper_32_bits(addr));
+ chunk = snd_sgbuf_get_chunk_size(dmab, offset, bdle_size);
+ bdl[2] = cpu_to_le32(chunk);
+
+ remaining -= chunk;
+ /* set IOC only for the last entry */
+ bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01);
+
+ bdl += 4;
+ offset += chunk;
+ cl->num_periods++;
+ }
+}
+
+void hda_cldma_setup(struct hda_cldma *cl)
+{
+ dma_addr_t bdl_addr = cl->dmab_bdl.addr;
+
+ cldma_setup_bdle(cl, cl->buffer_size / 2);
+
+ snd_hdac_stream_writel(cl, SD_BDLPL, AZX_SD_BDLPL_BDLPLBA(lower_32_bits(bdl_addr)));
+ snd_hdac_stream_writel(cl, SD_BDLPU, upper_32_bits(bdl_addr));
+
+ snd_hdac_stream_writel(cl, SD_CBL, cl->buffer_size);
+ snd_hdac_stream_writeb(cl, SD_LVI, cl->num_periods - 1);
+
+ snd_hdac_stream_updatel(cl, SD_CTL, AZX_SD_CTL_STRM_MASK, AZX_SD_CTL_STRM(cl));
+ /* enable spib */
+ snd_hdac_stream_writel(cl, CL_SPBFCTL, 1);
+}
+
+static irqreturn_t cldma_irq_handler(int irq, void *dev_id)
+{
+ struct hda_cldma *cl = dev_id;
+ u32 adspis;
+
+ adspis = snd_hdac_adsp_readl(cl, AVS_ADSP_REG_ADSPIS);
+ if (adspis == UINT_MAX)
+ return IRQ_NONE;
+ if (!(adspis & AVS_ADSP_ADSPIS_CLDMA))
+ return IRQ_NONE;
+
+ cl->sd_status = snd_hdac_stream_readb(cl, SD_STS);
+ dev_warn(cl->dev, "%s sd_status: 0x%08x\n", __func__, cl->sd_status);
+
+ /* disable CLDMA interrupt */
+ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0);
+
+ complete(&cl->completion);
+
+ return IRQ_HANDLED;
+}
+
+int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba,
+ unsigned int buffer_size)
+{
+ struct pci_dev *pci = to_pci_dev(bus->dev);
+ int ret;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, buffer_size, &cl->dmab_data);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, bus->dev, BDL_SIZE, &cl->dmab_bdl);
+ if (ret < 0)
+ goto alloc_err;
+
+ cl->dev = bus->dev;
+ cl->bus = bus;
+ cl->dsp_ba = dsp_ba;
+ cl->buffer_size = buffer_size;
+ cl->sd_addr = dsp_ba + AZX_CL_SD_BASE;
+
+ ret = pci_request_irq(pci, 0, cldma_irq_handler, NULL, cl, "CLDMA");
+ if (ret < 0) {
+ dev_err(cl->dev, "Failed to request CLDMA IRQ handler: %d\n", ret);
+ goto req_err;
+ }
+
+ return 0;
+
+req_err:
+ snd_dma_free_pages(&cl->dmab_bdl);
+alloc_err:
+ snd_dma_free_pages(&cl->dmab_data);
+
+ return ret;
+}
+
+void hda_cldma_free(struct hda_cldma *cl)
+{
+ struct pci_dev *pci = to_pci_dev(cl->dev);
+
+ pci_free_irq(pci, 0, cl);
+ snd_dma_free_pages(&cl->dmab_data);
+ snd_dma_free_pages(&cl->dmab_bdl);
+}
diff --git a/sound/soc/intel/avs/cldma.h b/sound/soc/intel/avs/cldma.h
new file mode 100644
index 000000000000..754fcf9ee585
--- /dev/null
+++ b/sound/soc/intel/avs/cldma.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef __SOUND_SOC_INTEL_AVS_CLDMA_H
+#define __SOUND_SOC_INTEL_AVS_CLDMA_H
+
+#define AVS_CL_DEFAULT_BUFFER_SIZE (32 * PAGE_SIZE)
+
+struct hda_cldma;
+extern struct hda_cldma code_loader;
+
+void hda_cldma_fill(struct hda_cldma *cl);
+void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay);
+
+int hda_cldma_start(struct hda_cldma *cl);
+int hda_cldma_stop(struct hda_cldma *cl);
+int hda_cldma_reset(struct hda_cldma *cl);
+
+void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size);
+void hda_cldma_setup(struct hda_cldma *cl);
+int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba,
+ unsigned int buffer_size);
+void hda_cldma_free(struct hda_cldma *cl);
+
+#endif
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
new file mode 100644
index 000000000000..a4d063d12fec
--- /dev/null
+++ b/sound/soc/intel/avs/core.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+// Special thanks to:
+// Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
+// Michal Sienkiewicz <michal.sienkiewicz@intel.com>
+// Filip Proborszcz
+//
+// for sharing Intel AudioDSP expertise and helping shape the very
+// foundation of this driver
+//
+
+#include <linux/pci.h>
+#include <sound/hdaudio.h>
+#include "avs.h"
+
+static void
+avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
+{
+ struct pci_dev *pci = to_pci_dev(bus->dev);
+ u32 data;
+
+ pci_read_config_dword(pci, reg, &data);
+ data &= ~mask;
+ data |= (value & mask);
+ pci_write_config_dword(pci, reg, data);
+}
+
+void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
+{
+ u32 value;
+
+ value = enable ? 0 : AZX_PGCTL_LSRMD_MASK;
+ avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL,
+ AZX_PGCTL_LSRMD_MASK, value);
+}
+
+static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
+{
+ u32 value;
+
+ value = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0;
+ avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, value);
+}
+
+void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
+{
+ avs_hdac_clock_gating_enable(&adev->base.core, enable);
+}
+
+void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
+{
+ u32 value;
+
+ value = enable ? AZX_VS_EM2_L1SEN : 0;
+ snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
+}
diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c
new file mode 100644
index 000000000000..3ff17bd22a5a
--- /dev/null
+++ b/sound/soc/intel/avs/dsp.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/hdaudio_ext.h>
+#include "avs.h"
+#include "registers.h"
+
+#define AVS_ADSPCS_INTERVAL_US 500
+#define AVS_ADSPCS_TIMEOUT_US 50000
+
+int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
+{
+ u32 value, mask, reg;
+ int ret;
+
+ mask = AVS_ADSPCS_SPA_MASK(core_mask);
+ value = power ? mask : 0;
+
+ snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
+
+ mask = AVS_ADSPCS_CPA_MASK(core_mask);
+ value = power ? mask : 0;
+
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
+ reg, (reg & mask) == value,
+ AVS_ADSPCS_INTERVAL_US,
+ AVS_ADSPCS_TIMEOUT_US);
+ if (ret)
+ dev_err(adev->dev, "core_mask %d power %s failed: %d\n",
+ core_mask, power ? "on" : "off", ret);
+
+ return ret;
+}
+
+int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset)
+{
+ u32 value, mask, reg;
+ int ret;
+
+ mask = AVS_ADSPCS_CRST_MASK(core_mask);
+ value = reset ? mask : 0;
+
+ snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
+
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
+ reg, (reg & mask) == value,
+ AVS_ADSPCS_INTERVAL_US,
+ AVS_ADSPCS_TIMEOUT_US);
+ if (ret)
+ dev_err(adev->dev, "core_mask %d %s reset failed: %d\n",
+ core_mask, reset ? "enter" : "exit", ret);
+
+ return ret;
+}
+
+int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
+{
+ u32 value, mask, reg;
+ int ret;
+
+ mask = AVS_ADSPCS_CSTALL_MASK(core_mask);
+ value = stall ? mask : 0;
+
+ snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
+
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
+ reg, (reg & mask) == value,
+ AVS_ADSPCS_INTERVAL_US,
+ AVS_ADSPCS_TIMEOUT_US);
+ if (ret)
+ dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
+ core_mask, stall ? "" : "un", ret);
+
+ return ret;
+}
+
+int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
+{
+ int ret;
+
+ ret = avs_dsp_op(adev, power, core_mask, true);
+ if (ret)
+ return ret;
+
+ ret = avs_dsp_op(adev, reset, core_mask, false);
+ if (ret)
+ return ret;
+
+ return avs_dsp_op(adev, stall, core_mask, false);
+}
+
+int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask)
+{
+ /* No error checks to allow for complete DSP shutdown. */
+ avs_dsp_op(adev, stall, core_mask, true);
+ avs_dsp_op(adev, reset, core_mask, true);
+
+ return avs_dsp_op(adev, power, core_mask, false);
+}
+
+static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask)
+{
+ u32 mask;
+ int ret;
+
+ ret = avs_dsp_core_enable(adev, core_mask);
+ if (ret < 0)
+ return ret;
+
+ mask = core_mask & ~AVS_MAIN_CORE_MASK;
+ if (!mask)
+ /*
+ * without main core, fw is dead anyway
+ * so setting D0 for it is futile.
+ */
+ return 0;
+
+ ret = avs_ipc_set_dx(adev, mask, true);
+ return AVS_IPC_RET(ret);
+}
+
+static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask)
+{
+ int ret;
+
+ ret = avs_ipc_set_dx(adev, core_mask, false);
+ if (ret)
+ return AVS_IPC_RET(ret);
+
+ return avs_dsp_core_disable(adev, core_mask);
+}
+
+static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
+{
+ u32 mask;
+ int ret;
+
+ mask = BIT_MASK(core_id);
+ if (mask == AVS_MAIN_CORE_MASK)
+ /* nothing to do for main core */
+ return 0;
+ if (core_id >= adev->hw_cfg.dsp_cores) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ adev->core_refs[core_id]++;
+ if (adev->core_refs[core_id] == 1) {
+ ret = avs_dsp_enable(adev, mask);
+ if (ret)
+ goto err_enable_dsp;
+ }
+
+ return 0;
+
+err_enable_dsp:
+ adev->core_refs[core_id]--;
+err:
+ dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
+ return ret;
+}
+
+static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
+{
+ u32 mask;
+ int ret;
+
+ mask = BIT_MASK(core_id);
+ if (mask == AVS_MAIN_CORE_MASK)
+ /* nothing to do for main core */
+ return 0;
+ if (core_id >= adev->hw_cfg.dsp_cores) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ adev->core_refs[core_id]--;
+ if (!adev->core_refs[core_id]) {
+ ret = avs_dsp_disable(adev, mask);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret);
+ return ret;
+}
+
+int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
+ u8 core_id, u8 domain, void *param, u32 param_size,
+ u16 *instance_id)
+{
+ struct avs_module_entry mentry;
+ bool was_loaded = false;
+ int ret, id;
+
+ id = avs_module_id_alloc(adev, module_id);
+ if (id < 0)
+ return id;
+
+ ret = avs_get_module_id_entry(adev, module_id, &mentry);
+ if (ret)
+ goto err_mod_entry;
+
+ ret = avs_dsp_get_core(adev, core_id);
+ if (ret)
+ goto err_mod_entry;
+
+ /* Load code into memory if this is the first instance. */
+ if (!id && !avs_module_entry_is_loaded(&mentry)) {
+ ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1);
+ if (ret) {
+ dev_err(adev->dev, "load modules failed: %d\n", ret);
+ goto err_mod_entry;
+ }
+ was_loaded = true;
+ }
+
+ ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id,
+ core_id, domain, param, param_size);
+ if (ret) {
+ ret = AVS_IPC_RET(ret);
+ goto err_ipc;
+ }
+
+ *instance_id = id;
+ return 0;
+
+err_ipc:
+ if (was_loaded)
+ avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
+ avs_dsp_put_core(adev, core_id);
+err_mod_entry:
+ avs_module_id_free(adev, module_id, id);
+ return ret;
+}
+
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+ u8 ppl_instance_id, u8 core_id)
+{
+ struct avs_module_entry mentry;
+ int ret;
+
+ /* Modules not owned by any pipeline need to be freed explicitly. */
+ if (ppl_instance_id == INVALID_PIPELINE_ID)
+ avs_ipc_delete_instance(adev, module_id, instance_id);
+
+ avs_module_id_free(adev, module_id, instance_id);
+
+ ret = avs_get_module_id_entry(adev, module_id, &mentry);
+ /* Unload occupied memory if this was the last instance. */
+ if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) {
+ if (avs_is_module_ida_empty(adev, module_id)) {
+ ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
+ if (ret)
+ dev_err(adev->dev, "unload modules failed: %d\n", ret);
+ }
+ }
+
+ avs_dsp_put_core(adev, core_id);
+}
+
+int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
+ bool lp, u16 attributes, u8 *instance_id)
+{
+ struct avs_fw_cfg *fw_cfg = &adev->fw_cfg;
+ int ret, id;
+
+ id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes);
+ if (ret) {
+ ida_free(&adev->ppl_ida, id);
+ return AVS_IPC_RET(ret);
+ }
+
+ *instance_id = id;
+ return 0;
+}
+
+int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id)
+{
+ int ret;
+
+ ret = avs_ipc_delete_pipeline(adev, instance_id);
+ if (ret)
+ ret = AVS_IPC_RET(ret);
+
+ ida_free(&adev->ppl_ida, instance_id);
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
new file mode 100644
index 000000000000..68aaf01edbf2
--- /dev/null
+++ b/sound/soc/intel/avs/ipc.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/slab.h>
+#include <sound/hdaudio_ext.h>
+#include "avs.h"
+#include "messages.h"
+#include "registers.h"
+
+#define AVS_IPC_TIMEOUT_MS 300
+
+static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
+{
+ struct avs_ipc *ipc = adev->ipc;
+ union avs_reply_msg msg = AVS_MSG(header);
+
+ ipc->rx.header = header;
+ /* Abort copying payload if request processing was unsuccessful. */
+ if (!msg.status) {
+ /* update size in case of LARGE_CONFIG_GET */
+ if (msg.msg_target == AVS_MOD_MSG &&
+ msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
+ ipc->rx.size = msg.ext.large_config.data_off_size;
+
+ memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
+ }
+}
+
+static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
+{
+ struct avs_notify_mod_data mod_data;
+ union avs_notify_msg msg = AVS_MSG(header);
+ size_t data_size = 0;
+ void *data = NULL;
+
+ /* Ignore spurious notifications until handshake is established. */
+ if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
+ dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
+ return;
+ }
+
+ /* Calculate notification payload size. */
+ switch (msg.notify_msg_type) {
+ case AVS_NOTIFY_FW_READY:
+ break;
+
+ case AVS_NOTIFY_PHRASE_DETECTED:
+ data_size = sizeof(struct avs_notify_voice_data);
+ break;
+
+ case AVS_NOTIFY_RESOURCE_EVENT:
+ data_size = sizeof(struct avs_notify_res_data);
+ break;
+
+ case AVS_NOTIFY_MODULE_EVENT:
+ /* To know the total payload size, header needs to be read first. */
+ memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
+ data_size = sizeof(mod_data) + mod_data.data_size;
+ break;
+
+ default:
+ dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
+ break;
+ }
+
+ if (data_size) {
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ memcpy_fromio(data, avs_uplink_addr(adev), data_size);
+ }
+
+ /* Perform notification-specific operations. */
+ switch (msg.notify_msg_type) {
+ case AVS_NOTIFY_FW_READY:
+ dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
+ adev->ipc->ready = true;
+ complete(&adev->fw_ready);
+ break;
+
+ default:
+ break;
+ }
+
+ kfree(data);
+}
+
+void avs_dsp_process_response(struct avs_dev *adev, u64 header)
+{
+ struct avs_ipc *ipc = adev->ipc;
+
+ /*
+ * Response may either be solicited - a reply for a request that has
+ * been sent beforehand - or unsolicited (notification).
+ */
+ if (avs_msg_is_reply(header)) {
+ /* Response processing is invoked from IRQ thread. */
+ spin_lock_irq(&ipc->rx_lock);
+ avs_dsp_receive_rx(adev, header);
+ ipc->rx_completed = true;
+ spin_unlock_irq(&ipc->rx_lock);
+ } else {
+ avs_dsp_process_notification(adev, header);
+ }
+
+ complete(&ipc->busy_completion);
+}
+
+irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
+{
+ struct avs_dev *adev = dev_id;
+ struct avs_ipc *ipc = adev->ipc;
+ u32 adspis, hipc_rsp, hipc_ack;
+ irqreturn_t ret = IRQ_NONE;
+
+ adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS);
+ if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC))
+ return ret;
+
+ hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE);
+ hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
+
+ /* DSP acked host's request */
+ if (hipc_ack & SKL_ADSP_HIPCIE_DONE) {
+ /*
+ * As an extra precaution, mask done interrupt. Code executed
+ * due to complete() found below does not assume any masking.
+ */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
+ AVS_ADSP_HIPCCTL_DONE, 0);
+
+ complete(&ipc->done_completion);
+
+ /* tell DSP it has our attention */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE,
+ SKL_ADSP_HIPCIE_DONE,
+ SKL_ADSP_HIPCIE_DONE);
+ /* unmask done interrupt */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
+ AVS_ADSP_HIPCCTL_DONE,
+ AVS_ADSP_HIPCCTL_DONE);
+ ret = IRQ_HANDLED;
+ }
+
+ /* DSP sent new response to process */
+ if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) {
+ /* mask busy interrupt */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
+ AVS_ADSP_HIPCCTL_BUSY, 0);
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
+{
+ struct avs_dev *adev = dev_id;
+ union avs_reply_msg msg;
+ u32 hipct, hipcte;
+
+ hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
+ hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE);
+
+ /* ensure DSP sent new response to process */
+ if (!(hipct & SKL_ADSP_HIPCT_BUSY))
+ return IRQ_NONE;
+
+ msg.primary = hipct;
+ msg.ext.val = hipcte;
+ avs_dsp_process_response(adev, msg.val);
+
+ /* tell DSP we accepted its message */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT,
+ SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY);
+ /* unmask busy interrupt */
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
+ AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY);
+
+ return IRQ_HANDLED;
+}
+
+static bool avs_ipc_is_busy(struct avs_ipc *ipc)
+{
+ struct avs_dev *adev = to_avs_dev(ipc->dev);
+ u32 hipc_rsp;
+
+ hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
+ return hipc_rsp & SKL_ADSP_HIPCT_BUSY;
+}
+
+static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
+{
+ u32 repeats_left = 128; /* to avoid infinite looping */
+ int ret;
+
+again:
+ ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
+
+ /* DSP could be unresponsive at this point. */
+ if (!ipc->ready)
+ return -EPERM;
+
+ if (!ret) {
+ if (!avs_ipc_is_busy(ipc))
+ return -ETIMEDOUT;
+ /*
+ * Firmware did its job, either notification or reply
+ * has been received - now wait until it's processed.
+ */
+ wait_for_completion_killable(&ipc->busy_completion);
+ }
+
+ /* Ongoing notification's bottom-half may cause early wakeup */
+ spin_lock(&ipc->rx_lock);
+ if (!ipc->rx_completed) {
+ if (repeats_left) {
+ /* Reply delayed due to notification. */
+ repeats_left--;
+ reinit_completion(&ipc->busy_completion);
+ spin_unlock(&ipc->rx_lock);
+ goto again;
+ }
+
+ spin_unlock(&ipc->rx_lock);
+ return -ETIMEDOUT;
+ }
+
+ spin_unlock(&ipc->rx_lock);
+ return 0;
+}
+
+static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
+{
+ lockdep_assert_held(&ipc->rx_lock);
+
+ ipc->rx.header = 0;
+ ipc->rx.size = reply ? reply->size : 0;
+ ipc->rx_completed = false;
+
+ reinit_completion(&ipc->done_completion);
+ reinit_completion(&ipc->busy_completion);
+}
+
+static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx)
+{
+ tx->header |= SKL_ADSP_HIPCI_BUSY;
+
+ if (tx->size)
+ memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
+ snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32);
+ snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX);
+}
+
+static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout)
+{
+ struct avs_ipc *ipc = adev->ipc;
+ int ret;
+
+ if (!ipc->ready)
+ return -EPERM;
+
+ mutex_lock(&ipc->msg_mutex);
+
+ spin_lock(&ipc->rx_lock);
+ avs_ipc_msg_init(ipc, reply);
+ avs_dsp_send_tx(adev, request);
+ spin_unlock(&ipc->rx_lock);
+
+ ret = avs_ipc_wait_busy_completion(ipc, timeout);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ dev_crit(adev->dev, "communication severed: %d, rebooting dsp..\n", ret);
+
+ avs_ipc_block(ipc);
+ }
+ goto exit;
+ }
+
+ ret = ipc->rx.rsp.status;
+ if (reply) {
+ reply->header = ipc->rx.header;
+ if (reply->data && ipc->rx.size)
+ memcpy(reply->data, ipc->rx.data, reply->size);
+ }
+
+exit:
+ mutex_unlock(&ipc->msg_mutex);
+ return ret;
+}
+
+int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply, int timeout)
+{
+ return avs_dsp_do_send_msg(adev, request, reply, timeout);
+}
+
+int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
+ struct avs_ipc_msg *reply)
+{
+ return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
+}
+
+static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
+{
+ struct avs_ipc *ipc = adev->ipc;
+ int ret;
+
+ mutex_lock(&ipc->msg_mutex);
+
+ spin_lock(&ipc->rx_lock);
+ avs_ipc_msg_init(ipc, NULL);
+ avs_dsp_send_tx(adev, request);
+ spin_unlock(&ipc->rx_lock);
+
+ /* ROM messages must be sent before main core is unstalled */
+ ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
+ if (!ret) {
+ ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
+ ret = ret ? 0 : -ETIMEDOUT;
+ }
+
+ mutex_unlock(&ipc->msg_mutex);
+
+ return ret;
+}
+
+int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
+{
+ return avs_dsp_do_send_rom_msg(adev, request, timeout);
+}
+
+int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request)
+{
+ return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms);
+}
+
+void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable)
+{
+ u32 value, mask;
+
+ /*
+ * No particular bit setting order. All of these are required
+ * to have a functional SW <-> FW communication.
+ */
+ value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
+ snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
+
+ mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY;
+ value = enable ? mask : 0;
+ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value);
+}
+
+int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
+{
+ ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
+ if (!ipc->rx.data)
+ return -ENOMEM;
+
+ ipc->dev = dev;
+ ipc->ready = false;
+ ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
+ init_completion(&ipc->done_completion);
+ init_completion(&ipc->busy_completion);
+ spin_lock_init(&ipc->rx_lock);
+ mutex_init(&ipc->msg_mutex);
+
+ return 0;
+}
+
+void avs_ipc_block(struct avs_ipc *ipc)
+{
+ ipc->ready = false;
+}
diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
new file mode 100644
index 000000000000..c47f85161d95
--- /dev/null
+++ b/sound/soc/intel/avs/loader.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/hdaudio.h>
+#include <sound/hdaudio_ext.h>
+#include "avs.h"
+#include "cldma.h"
+#include "messages.h"
+#include "registers.h"
+
+#define AVS_ROM_STS_MASK 0xFF
+#define AVS_ROM_INIT_DONE 0x1
+#define SKL_ROM_BASEFW_ENTERED 0xF
+#define APL_ROM_FW_ENTERED 0x5
+#define AVS_ROM_INIT_POLLING_US 5
+#define SKL_ROM_INIT_TIMEOUT_US 1000000
+#define APL_ROM_INIT_TIMEOUT_US 300000
+#define APL_ROM_INIT_RETRIES 3
+
+#define AVS_FW_INIT_POLLING_US 500
+#define AVS_FW_INIT_TIMEOUT_US 3000000
+#define AVS_FW_INIT_TIMEOUT_MS 3000
+
+#define AVS_CLDMA_START_DELAY_MS 100
+
+#define AVS_ROOT_DIR "intel/avs"
+#define AVS_BASEFW_FILENAME "dsp_basefw.bin"
+#define AVS_EXT_MANIFEST_MAGIC 0x31454124
+#define SKL_MANIFEST_MAGIC 0x00000006
+#define SKL_ADSPFW_OFFSET 0x284
+
+/* Occasionally, engineering (release candidate) firmware is provided for testing. */
+static bool debug_ignore_fw_version;
+module_param_named(ignore_fw_version, debug_ignore_fw_version, bool, 0444);
+MODULE_PARM_DESC(ignore_fw_version, "Verify FW version 0=yes (default), 1=no");
+
+#define AVS_LIB_NAME_SIZE 8
+
+struct avs_fw_manifest {
+ u32 id;
+ u32 len;
+ char name[AVS_LIB_NAME_SIZE];
+ u32 preload_page_count;
+ u32 img_flags;
+ u32 feature_mask;
+ struct avs_fw_version version;
+} __packed;
+
+struct avs_fw_ext_manifest {
+ u32 id;
+ u32 len;
+ u16 version_major;
+ u16 version_minor;
+ u32 entries;
+} __packed;
+
+static int avs_fw_ext_manifest_strip(struct firmware *fw)
+{
+ struct avs_fw_ext_manifest *man;
+
+ if (fw->size < sizeof(*man))
+ return -EINVAL;
+
+ man = (struct avs_fw_ext_manifest *)fw->data;
+ if (man->id == AVS_EXT_MANIFEST_MAGIC) {
+ fw->data += man->len;
+ fw->size -= man->len;
+ }
+
+ return 0;
+}
+
+static int avs_fw_manifest_offset(struct firmware *fw)
+{
+ /* Header type found in first DWORD of fw binary. */
+ u32 magic = *(u32 *)fw->data;
+
+ switch (magic) {
+ case SKL_MANIFEST_MAGIC:
+ return SKL_ADSPFW_OFFSET;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int avs_fw_manifest_strip_verify(struct avs_dev *adev, struct firmware *fw,
+ const struct avs_fw_version *min)
+{
+ struct avs_fw_manifest *man;
+ int offset, ret;
+
+ ret = avs_fw_ext_manifest_strip(fw);
+ if (ret)
+ return ret;
+
+ offset = avs_fw_manifest_offset(fw);
+ if (offset < 0)
+ return offset;
+
+ if (fw->size < offset + sizeof(*man))
+ return -EINVAL;
+ if (!min)
+ return 0;
+
+ man = (struct avs_fw_manifest *)(fw->data + offset);
+ if (man->version.major != min->major ||
+ man->version.minor != min->minor ||
+ man->version.hotfix != min->hotfix ||
+ man->version.build < min->build) {
+ dev_warn(adev->dev, "bad FW version %d.%d.%d.%d, expected %d.%d.%d.%d or newer\n",
+ man->version.major, man->version.minor,
+ man->version.hotfix, man->version.build,
+ min->major, min->minor, min->hotfix, min->build);
+
+ if (!debug_ignore_fw_version)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw)
+{
+ struct hda_cldma *cl = &code_loader;
+ unsigned int reg;
+ int ret;
+
+ ret = avs_dsp_op(adev, power, AVS_MAIN_CORE_MASK, true);
+ if (ret < 0)
+ return ret;
+
+ ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = hda_cldma_reset(cl);
+ if (ret < 0) {
+ dev_err(adev->dev, "cldma reset failed: %d\n", ret);
+ return ret;
+ }
+ hda_cldma_setup(cl);
+
+ ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
+ if (ret < 0)
+ return ret;
+
+ reinit_completion(&adev->fw_ready);
+ avs_dsp_op(adev, int_control, true);
+
+ /* await ROM init */
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg,
+ (reg & AVS_ROM_INIT_DONE) == AVS_ROM_INIT_DONE,
+ AVS_ROM_INIT_POLLING_US, SKL_ROM_INIT_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
+ return ret;
+ }
+
+ hda_cldma_set_data(cl, (void *)fw->data, fw->size);
+ /* transfer firmware */
+ hda_cldma_transfer(cl, 0);
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg,
+ (reg & AVS_ROM_STS_MASK) == SKL_ROM_BASEFW_ENTERED,
+ AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
+ hda_cldma_stop(cl);
+ if (ret < 0) {
+ dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
+ return ret;
+ }
+
+ return 0;
+}
+
+int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id)
+{
+ struct hda_cldma *cl = &code_loader;
+ int ret;
+
+ hda_cldma_set_data(cl, (void *)lib->data, lib->size);
+ /* transfer modules manifest */
+ hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS));
+
+ /* DMA id ignored as there is only ever one code-loader DMA */
+ ret = avs_ipc_load_library(adev, 0, id);
+ hda_cldma_stop(cl);
+
+ if (ret) {
+ ret = AVS_IPC_RET(ret);
+ dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret);
+ }
+
+ return ret;
+}
+
+static int avs_cldma_load_module(struct avs_dev *adev, struct avs_module_entry *mentry)
+{
+ struct hda_cldma *cl = &code_loader;
+ const struct firmware *mod;
+ char *mod_name;
+ int ret;
+
+ mod_name = kasprintf(GFP_KERNEL, "%s/%s/dsp_mod_%pUL.bin", AVS_ROOT_DIR,
+ adev->spec->name, mentry->uuid.b);
+ if (!mod_name)
+ return -ENOMEM;
+
+ ret = avs_request_firmware(adev, &mod, mod_name);
+ kfree(mod_name);
+ if (ret < 0)
+ return ret;
+
+ hda_cldma_set_data(cl, (void *)mod->data, mod->size);
+ hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS));
+ ret = avs_ipc_load_modules(adev, &mentry->module_id, 1);
+ hda_cldma_stop(cl);
+
+ if (ret) {
+ dev_err(adev->dev, "load module %d failed: %d\n", mentry->module_id, ret);
+ avs_release_last_firmware(adev);
+ return AVS_IPC_RET(ret);
+ }
+
+ return 0;
+}
+
+int avs_cldma_transfer_modules(struct avs_dev *adev, bool load,
+ struct avs_module_entry *mods, u32 num_mods)
+{
+ u16 *mod_ids;
+ int ret, i;
+
+ /* Either load to DSP or unload them to free space. */
+ if (load) {
+ for (i = 0; i < num_mods; i++) {
+ ret = avs_cldma_load_module(adev, &mods[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ mod_ids = kcalloc(num_mods, sizeof(u16), GFP_KERNEL);
+ if (!mod_ids)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mods; i++)
+ mod_ids[i] = mods[i].module_id;
+
+ ret = avs_ipc_unload_modules(adev, mod_ids, num_mods);
+ kfree(mod_ids);
+ if (ret)
+ return AVS_IPC_RET(ret);
+
+ return 0;
+}
+
+static int
+avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge)
+{
+ const struct avs_spec *const spec = adev->spec;
+ unsigned int corex_mask, reg;
+ int ret;
+
+ corex_mask = spec->core_init_mask & ~AVS_MAIN_CORE_MASK;
+
+ ret = avs_dsp_op(adev, power, spec->core_init_mask, true);
+ if (ret < 0)
+ goto err;
+
+ ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false);
+ if (ret < 0)
+ goto err;
+
+ reinit_completion(&adev->fw_ready);
+ avs_dsp_op(adev, int_control, true);
+
+ /* set boot config */
+ ret = avs_ipc_set_boot_config(adev, dma_id, purge);
+ if (ret) {
+ ret = AVS_IPC_RET(ret);
+ goto err;
+ }
+
+ /* await ROM init */
+ ret = snd_hdac_adsp_readq_poll(adev, spec->rom_status, reg,
+ (reg & 0xF) == AVS_ROM_INIT_DONE ||
+ (reg & 0xF) == APL_ROM_FW_ENTERED,
+ AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ goto err;
+ }
+
+ /* power down non-main cores */
+ if (corex_mask) {
+ ret = avs_dsp_op(adev, power, corex_mask, false);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ avs_dsp_core_disable(adev, spec->core_init_mask);
+ return ret;
+}
+
+static int avs_imr_load_basefw(struct avs_dev *adev)
+{
+ int ret;
+
+ /* DMA id ignored when flashing from IMR as no transfer occurs. */
+ ret = avs_hda_init_rom(adev, 0, false);
+ if (ret < 0) {
+ dev_err(adev->dev, "rom init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&adev->fw_ready,
+ msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(adev->dev, "firmware ready timeout\n");
+ avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw)
+{
+ struct snd_pcm_substream substream;
+ struct snd_dma_buffer dmab;
+ struct hdac_ext_stream *estream;
+ struct hdac_stream *hstream;
+ struct hdac_bus *bus = &adev->base.core;
+ unsigned int sdfmt, reg;
+ int ret, i;
+
+ /* configure hda dma */
+ memset(&substream, 0, sizeof(substream));
+ substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ estream = snd_hdac_ext_stream_assign(bus, &substream,
+ HDAC_EXT_STREAM_TYPE_HOST);
+ if (!estream)
+ return -ENODEV;
+ hstream = hdac_stream(estream);
+
+ /* code loading performed with default format */
+ sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0);
+ ret = snd_hdac_dsp_prepare(hstream, sdfmt, fw->size, &dmab);
+ if (ret < 0)
+ goto release_stream;
+
+ /* enable SPIB for hda stream */
+ snd_hdac_ext_stream_spbcap_enable(bus, true, hstream->index);
+ ret = snd_hdac_ext_stream_set_spib(bus, estream, fw->size);
+ if (ret)
+ goto cleanup_resources;
+
+ memcpy(dmab.area, fw->data, fw->size);
+
+ for (i = 0; i < APL_ROM_INIT_RETRIES; i++) {
+ unsigned int dma_id = hstream->stream_tag - 1;
+
+ ret = avs_hda_init_rom(adev, dma_id, true);
+ if (!ret)
+ break;
+ dev_info(adev->dev, "#%d rom init fail: %d\n", i + 1, ret);
+ }
+ if (ret < 0)
+ goto cleanup_resources;
+
+ /* transfer firmware */
+ snd_hdac_dsp_trigger(hstream, true);
+ ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg,
+ (reg & AVS_ROM_STS_MASK) == APL_ROM_FW_ENTERED,
+ AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
+ snd_hdac_dsp_trigger(hstream, false);
+ if (ret < 0) {
+ dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
+ }
+
+cleanup_resources:
+ /* disable SPIB for hda stream */
+ snd_hdac_ext_stream_spbcap_enable(bus, false, hstream->index);
+ snd_hdac_ext_stream_set_spib(bus, estream, 0);
+
+ snd_hdac_dsp_cleanup(hstream, &dmab);
+release_stream:
+ snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
+
+ return ret;
+}
+
+int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id)
+{
+ struct snd_pcm_substream substream;
+ struct snd_dma_buffer dmab;
+ struct hdac_ext_stream *estream;
+ struct hdac_stream *stream;
+ struct hdac_bus *bus = &adev->base.core;
+ unsigned int sdfmt;
+ int ret;
+
+ /* configure hda dma */
+ memset(&substream, 0, sizeof(substream));
+ substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ estream = snd_hdac_ext_stream_assign(bus, &substream,
+ HDAC_EXT_STREAM_TYPE_HOST);
+ if (!estream)
+ return -ENODEV;
+ stream = hdac_stream(estream);
+
+ /* code loading performed with default format */
+ sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0);
+ ret = snd_hdac_dsp_prepare(stream, sdfmt, lib->size, &dmab);
+ if (ret < 0)
+ goto release_stream;
+
+ /* enable SPIB for hda stream */
+ snd_hdac_ext_stream_spbcap_enable(bus, true, stream->index);
+ snd_hdac_ext_stream_set_spib(bus, estream, lib->size);
+
+ memcpy(dmab.area, lib->data, lib->size);
+
+ /* transfer firmware */
+ snd_hdac_dsp_trigger(stream, true);
+ ret = avs_ipc_load_library(adev, stream->stream_tag - 1, id);
+ snd_hdac_dsp_trigger(stream, false);
+ if (ret) {
+ dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret);
+ ret = AVS_IPC_RET(ret);
+ }
+
+ /* disable SPIB for hda stream */
+ snd_hdac_ext_stream_spbcap_enable(bus, false, stream->index);
+ snd_hdac_ext_stream_set_spib(bus, estream, 0);
+
+ snd_hdac_dsp_cleanup(stream, &dmab);
+release_stream:
+ snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
+
+ return ret;
+}
+
+int avs_hda_transfer_modules(struct avs_dev *adev, bool load,
+ struct avs_module_entry *mods, u32 num_mods)
+{
+ /*
+ * All platforms without CLDMA are equipped with IMR,
+ * and thus the module transferring is offloaded to DSP.
+ */
+ return 0;
+}
+
+static int avs_dsp_load_basefw(struct avs_dev *adev)
+{
+ const struct avs_fw_version *min_req;
+ const struct avs_spec *const spec = adev->spec;
+ const struct firmware *fw;
+ struct firmware stripped_fw;
+ char *filename;
+ int ret;
+
+ filename = kasprintf(GFP_KERNEL, "%s/%s/%s", AVS_ROOT_DIR, spec->name, AVS_BASEFW_FILENAME);
+ if (!filename)
+ return -ENOMEM;
+
+ ret = avs_request_firmware(adev, &fw, filename);
+ kfree(filename);
+ if (ret < 0) {
+ dev_err(adev->dev, "request firmware failed: %d\n", ret);
+ return ret;
+ }
+
+ stripped_fw = *fw;
+ min_req = &adev->spec->min_fw_version;
+
+ ret = avs_fw_manifest_strip_verify(adev, &stripped_fw, min_req);
+ if (ret < 0) {
+ dev_err(adev->dev, "invalid firmware data: %d\n", ret);
+ goto release_fw;
+ }
+
+ ret = avs_dsp_op(adev, load_basefw, &stripped_fw);
+ if (ret < 0) {
+ dev_err(adev->dev, "basefw load failed: %d\n", ret);
+ goto release_fw;
+ }
+
+ ret = wait_for_completion_timeout(&adev->fw_ready,
+ msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(adev->dev, "firmware ready timeout\n");
+ avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
+ ret = -ETIMEDOUT;
+ goto release_fw;
+ }
+
+ return 0;
+
+release_fw:
+ avs_release_last_firmware(adev);
+ return ret;
+}
+
+int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge)
+{
+ int ret, i;
+
+ /* Forgo full boot if flash from IMR succeeds. */
+ if (!purge && avs_platattr_test(adev, IMR)) {
+ ret = avs_imr_load_basefw(adev);
+ if (!ret)
+ return 0;
+
+ dev_dbg(adev->dev, "firmware flash from imr failed: %d\n", ret);
+ }
+
+ /* Full boot, clear cached data except for basefw (slot 0). */
+ for (i = 1; i < adev->fw_cfg.max_libs_count; i++)
+ memset(adev->lib_names[i], 0, AVS_LIB_NAME_SIZE);
+
+ avs_hda_clock_gating_enable(adev, false);
+ avs_hda_l1sen_enable(adev, false);
+
+ ret = avs_dsp_load_basefw(adev);
+
+ avs_hda_l1sen_enable(adev, true);
+ avs_hda_clock_gating_enable(adev, true);
+
+ if (ret < 0)
+ return ret;
+
+ /* With all code loaded, refresh module information. */
+ ret = avs_module_info_init(adev, true);
+ if (ret) {
+ dev_err(adev->dev, "init module info failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int avs_dsp_first_boot_firmware(struct avs_dev *adev)
+{
+ int ret, i;
+
+ if (avs_platattr_test(adev, CLDMA)) {
+ ret = hda_cldma_init(&code_loader, &adev->base.core,
+ adev->dsp_ba, AVS_CL_DEFAULT_BUFFER_SIZE);
+ if (ret < 0) {
+ dev_err(adev->dev, "cldma init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = avs_dsp_boot_firmware(adev, true);
+ if (ret < 0) {
+ dev_err(adev->dev, "firmware boot failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = avs_ipc_get_hw_config(adev, &adev->hw_cfg);
+ if (ret) {
+ dev_err(adev->dev, "get hw cfg failed: %d\n", ret);
+ return AVS_IPC_RET(ret);
+ }
+
+ ret = avs_ipc_get_fw_config(adev, &adev->fw_cfg);
+ if (ret) {
+ dev_err(adev->dev, "get fw cfg failed: %d\n", ret);
+ return AVS_IPC_RET(ret);
+ }
+
+ adev->core_refs = devm_kcalloc(adev->dev, adev->hw_cfg.dsp_cores,
+ sizeof(*adev->core_refs), GFP_KERNEL);
+ adev->lib_names = devm_kcalloc(adev->dev, adev->fw_cfg.max_libs_count,
+ sizeof(*adev->lib_names), GFP_KERNEL);
+ if (!adev->core_refs || !adev->lib_names)
+ return -ENOMEM;
+
+ for (i = 0; i < adev->fw_cfg.max_libs_count; i++) {
+ adev->lib_names[i] = devm_kzalloc(adev->dev, AVS_LIB_NAME_SIZE, GFP_KERNEL);
+ if (!adev->lib_names[i])
+ return -ENOMEM;
+ }
+
+ /* basefw always occupies slot 0 */
+ strcpy(&adev->lib_names[0][0], "BASEFW");
+
+ ida_init(&adev->ppl_ida);
+
+ return 0;
+}
diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c
new file mode 100644
index 000000000000..004da166a943
--- /dev/null
+++ b/sound/soc/intel/avs/messages.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/slab.h>
+#include "avs.h"
+#include "messages.h"
+
+#define AVS_CL_TIMEOUT_MS 5000
+
+int avs_ipc_set_boot_config(struct avs_dev *adev, u32 dma_id, u32 purge)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(ROM_CONTROL);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.boot_cfg.rom_ctrl_msg_type = AVS_ROM_SET_BOOT_CONFIG;
+ msg.boot_cfg.dma_id = dma_id;
+ msg.boot_cfg.purge_request = purge;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_rom_msg(adev, &request);
+ if (ret)
+ avs_ipc_err(adev, &request, "set boot config", ret);
+
+ return ret;
+}
+
+int avs_ipc_load_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_MULTIPLE_MODULES);
+ struct avs_ipc_msg request;
+ int ret;
+
+ msg.load_multi_mods.mod_cnt = num_mod_ids;
+ request.header = msg.val;
+ request.data = mod_ids;
+ request.size = sizeof(*mod_ids) * num_mod_ids;
+
+ ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS);
+ if (ret)
+ avs_ipc_err(adev, &request, "load multiple modules", ret);
+
+ return ret;
+}
+
+int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(UNLOAD_MULTIPLE_MODULES);
+ struct avs_ipc_msg request;
+ int ret;
+
+ msg.load_multi_mods.mod_cnt = num_mod_ids;
+ request.header = msg.val;
+ request.data = mod_ids;
+ request.size = sizeof(*mod_ids) * num_mod_ids;
+
+ ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS);
+ if (ret)
+ avs_ipc_err(adev, &request, "unload multiple modules", ret);
+
+ return ret;
+}
+
+int avs_ipc_load_library(struct avs_dev *adev, u32 dma_id, u32 lib_id)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_LIBRARY);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.load_lib.dma_id = dma_id;
+ msg.load_lib.lib_id = lib_id;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS);
+ if (ret)
+ avs_ipc_err(adev, &request, "load library", ret);
+
+ return ret;
+}
+
+int avs_ipc_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
+ u8 instance_id, bool lp, u16 attributes)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(CREATE_PIPELINE);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.create_ppl.ppl_mem_size = req_size;
+ msg.create_ppl.ppl_priority = priority;
+ msg.create_ppl.instance_id = instance_id;
+ msg.ext.create_ppl.lp = lp;
+ msg.ext.create_ppl.attributes = attributes;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "create pipeline", ret);
+
+ return ret;
+}
+
+int avs_ipc_delete_pipeline(struct avs_dev *adev, u8 instance_id)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(DELETE_PIPELINE);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.ppl.instance_id = instance_id;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "delete pipeline", ret);
+
+ return ret;
+}
+
+int avs_ipc_set_pipeline_state(struct avs_dev *adev, u8 instance_id,
+ enum avs_pipeline_state state)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(SET_PIPELINE_STATE);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.set_ppl_state.ppl_id = instance_id;
+ msg.set_ppl_state.state = state;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "set pipeline state", ret);
+
+ return ret;
+}
+
+int avs_ipc_get_pipeline_state(struct avs_dev *adev, u8 instance_id,
+ enum avs_pipeline_state *state)
+{
+ union avs_global_msg msg = AVS_GLOBAL_REQUEST(GET_PIPELINE_STATE);
+ struct avs_ipc_msg request = {{0}};
+ struct avs_ipc_msg reply = {{0}};
+ int ret;
+
+ msg.get_ppl_state.ppl_id = instance_id;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, &reply);
+ if (ret) {
+ avs_ipc_err(adev, &request, "get pipeline state", ret);
+ return ret;
+ }
+
+ *state = reply.rsp.ext.get_ppl_state.state;
+ return ret;
+}
+
+/*
+ * avs_ipc_init_instance - Initialize module instance
+ *
+ * @adev: Driver context
+ * @module_id: Module-type id
+ * @instance_id: Unique module instance id
+ * @ppl_id: Parent pipeline id
+ * @core_id: DSP core to allocate module on
+ * @domain: Processing domain (low latency or data processing)
+ * @param: Module-type specific configuration
+ * @param_size: Size of @param in bytes
+ *
+ * Argument verification, as well as pipeline state checks are done by the
+ * firmware.
+ *
+ * Note: @ppl_id and @core_id are independent of each other as single pipeline
+ * can be composed of module instances located on different DSP cores.
+ */
+int avs_ipc_init_instance(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u8 ppl_id, u8 core_id, u8 domain,
+ void *param, u32 param_size)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(INIT_INSTANCE);
+ struct avs_ipc_msg request;
+ int ret;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ /* firmware expects size provided in dwords */
+ msg.ext.init_instance.param_block_size = DIV_ROUND_UP(param_size, sizeof(u32));
+ msg.ext.init_instance.ppl_instance_id = ppl_id;
+ msg.ext.init_instance.core_id = core_id;
+ msg.ext.init_instance.proc_domain = domain;
+
+ request.header = msg.val;
+ request.data = param;
+ request.size = param_size;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "init instance", ret);
+
+ return ret;
+}
+
+/*
+ * avs_ipc_delete_instance - Delete module instance
+ *
+ * @adev: Driver context
+ * @module_id: Module-type id
+ * @instance_id: Unique module instance id
+ *
+ * Argument verification, as well as pipeline state checks are done by the
+ * firmware.
+ *
+ * Note: only standalone modules i.e. without a parent pipeline shall be
+ * deleted using this IPC message. In all other cases, pipeline owning the
+ * modules performs cleanup automatically when it is deleted.
+ */
+int avs_ipc_delete_instance(struct avs_dev *adev, u16 module_id, u8 instance_id)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(DELETE_INSTANCE);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "delete instance", ret);
+
+ return ret;
+}
+
+/*
+ * avs_ipc_bind - Bind two module instances
+ *
+ * @adev: Driver context
+ * @module_id: Source module-type id
+ * @instance_id: Source module instance id
+ * @dst_module_id: Sink module-type id
+ * @dst_instance_id: Sink module instance id
+ * @dst_queue: Sink module pin to bind @src_queue with
+ * @src_queue: Source module pin to bind @dst_queue with
+ */
+int avs_ipc_bind(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u16 dst_module_id, u8 dst_instance_id,
+ u8 dst_queue, u8 src_queue)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(BIND);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ msg.ext.bind_unbind.dst_module_id = dst_module_id;
+ msg.ext.bind_unbind.dst_instance_id = dst_instance_id;
+ msg.ext.bind_unbind.dst_queue = dst_queue;
+ msg.ext.bind_unbind.src_queue = src_queue;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "bind modules", ret);
+
+ return ret;
+}
+
+/*
+ * avs_ipc_unbind - Unbind two module instances
+ *
+ * @adev: Driver context
+ * @module_id: Source module-type id
+ * @instance_id: Source module instance id
+ * @dst_module_id: Sink module-type id
+ * @dst_instance_id: Sink module instance id
+ * @dst_queue: Sink module pin to unbind @src_queue from
+ * @src_queue: Source module pin to unbind @dst_queue from
+ */
+int avs_ipc_unbind(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u16 dst_module_id, u8 dst_instance_id,
+ u8 dst_queue, u8 src_queue)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(UNBIND);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ msg.ext.bind_unbind.dst_module_id = dst_module_id;
+ msg.ext.bind_unbind.dst_instance_id = dst_instance_id;
+ msg.ext.bind_unbind.dst_queue = dst_queue;
+ msg.ext.bind_unbind.src_queue = src_queue;
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "unbind modules", ret);
+
+ return ret;
+}
+
+static int __avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u8 param_id, bool init_block, bool final_block,
+ u8 *request_data, size_t request_size, size_t off_size)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_SET);
+ struct avs_ipc_msg request;
+ int ret;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ msg.ext.large_config.data_off_size = off_size;
+ msg.ext.large_config.large_param_id = param_id;
+ msg.ext.large_config.final_block = final_block;
+ msg.ext.large_config.init_block = init_block;
+
+ request.header = msg.val;
+ request.data = request_data;
+ request.size = request_size;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "large config set", ret);
+
+ return ret;
+}
+
+int avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id,
+ u8 instance_id, u8 param_id,
+ u8 *request, size_t request_size)
+{
+ size_t remaining, tx_size;
+ bool final;
+ int ret;
+
+ remaining = request_size;
+ tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining);
+ final = (tx_size == remaining);
+
+ /* Initial request states total payload size. */
+ ret = __avs_ipc_set_large_config(adev, module_id, instance_id,
+ param_id, 1, final, request, tx_size,
+ request_size);
+ if (ret)
+ return ret;
+
+ remaining -= tx_size;
+
+ /* Loop the rest only when payload exceeds mailbox's size. */
+ while (remaining) {
+ size_t offset;
+
+ offset = request_size - remaining;
+ tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining);
+ final = (tx_size == remaining);
+
+ ret = __avs_ipc_set_large_config(adev, module_id, instance_id,
+ param_id, 0, final,
+ request + offset, tx_size,
+ offset);
+ if (ret)
+ return ret;
+
+ remaining -= tx_size;
+ }
+
+ return 0;
+}
+
+int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u8 param_id, u8 *request_data, size_t request_size,
+ u8 **reply_data, size_t *reply_size)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_GET);
+ struct avs_ipc_msg request;
+ struct avs_ipc_msg reply = {{0}};
+ size_t size;
+ void *buf;
+ int ret;
+
+ reply.data = kzalloc(AVS_MAILBOX_SIZE, GFP_KERNEL);
+ if (!reply.data)
+ return -ENOMEM;
+
+ msg.module_id = module_id;
+ msg.instance_id = instance_id;
+ msg.ext.large_config.data_off_size = request_size;
+ msg.ext.large_config.large_param_id = param_id;
+ /* final_block is always 0 on request. Updated by fw on reply. */
+ msg.ext.large_config.final_block = 0;
+ msg.ext.large_config.init_block = 1;
+
+ request.header = msg.val;
+ request.data = request_data;
+ request.size = request_size;
+ reply.size = AVS_MAILBOX_SIZE;
+
+ ret = avs_dsp_send_msg(adev, &request, &reply);
+ if (ret) {
+ avs_ipc_err(adev, &request, "large config get", ret);
+ kfree(reply.data);
+ return ret;
+ }
+
+ size = reply.rsp.ext.large_config.data_off_size;
+ buf = krealloc(reply.data, size, GFP_KERNEL);
+ if (!buf) {
+ kfree(reply.data);
+ return -ENOMEM;
+ }
+
+ *reply_data = buf;
+ *reply_size = size;
+
+ return 0;
+}
+
+int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(SET_DX);
+ struct avs_ipc_msg request;
+ struct avs_dxstate_info dx;
+ int ret;
+
+ dx.core_mask = core_mask;
+ dx.dx_mask = powerup ? core_mask : 0;
+ request.header = msg.val;
+ request.data = &dx;
+ request.size = sizeof(dx);
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "set dx", ret);
+
+ return ret;
+}
+
+/*
+ * avs_ipc_set_d0ix - Set power gating policy (entering D0IX substates)
+ *
+ * @enable_pg: Whether to enable or disable power gating
+ * @streaming: Whether a stream is running when transitioning
+ */
+int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming)
+{
+ union avs_module_msg msg = AVS_MODULE_REQUEST(SET_D0IX);
+ struct avs_ipc_msg request = {{0}};
+ int ret;
+
+ msg.ext.set_d0ix.wake = enable_pg;
+ msg.ext.set_d0ix.streaming = streaming;
+
+ request.header = msg.val;
+
+ ret = avs_dsp_send_msg(adev, &request, NULL);
+ if (ret)
+ avs_ipc_err(adev, &request, "set d0ix", ret);
+
+ return ret;
+}
+
+int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
+{
+ struct avs_tlv *tlv;
+ size_t payload_size;
+ size_t offset = 0;
+ u8 *payload;
+ int ret;
+
+ ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID,
+ AVS_BASEFW_FIRMWARE_CONFIG, NULL, 0,
+ &payload, &payload_size);
+ if (ret)
+ return ret;
+
+ while (offset < payload_size) {
+ tlv = (struct avs_tlv *)(payload + offset);
+
+ switch (tlv->type) {
+ case AVS_FW_CFG_FW_VERSION:
+ memcpy(&cfg->fw_version, tlv->value, sizeof(cfg->fw_version));
+ break;
+
+ case AVS_FW_CFG_MEMORY_RECLAIMED:
+ cfg->memory_reclaimed = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_SLOW_CLOCK_FREQ_HZ:
+ cfg->slow_clock_freq_hz = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_FAST_CLOCK_FREQ_HZ:
+ cfg->fast_clock_freq_hz = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_ALH_SUPPORT_LEVEL:
+ cfg->alh_support = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_IPC_DL_MAILBOX_BYTES:
+ cfg->ipc_dl_mailbox_bytes = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_IPC_UL_MAILBOX_BYTES:
+ cfg->ipc_ul_mailbox_bytes = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_TRACE_LOG_BYTES:
+ cfg->trace_log_bytes = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_PPL_COUNT:
+ cfg->max_ppl_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_ASTATE_COUNT:
+ cfg->max_astate_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_MODULE_PIN_COUNT:
+ cfg->max_module_pin_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MODULES_COUNT:
+ cfg->modules_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_MOD_INST_COUNT:
+ cfg->max_mod_inst_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT:
+ cfg->max_ll_tasks_per_pri_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_LL_PRI_COUNT:
+ cfg->ll_pri_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_DP_TASKS_COUNT:
+ cfg->max_dp_tasks_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_MAX_LIBS_COUNT:
+ cfg->max_libs_count = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_XTAL_FREQ_HZ:
+ cfg->xtal_freq_hz = *tlv->value;
+ break;
+
+ case AVS_FW_CFG_POWER_GATING_POLICY:
+ cfg->power_gating_policy = *tlv->value;
+ break;
+
+ /* Known but not useful to us. */
+ case AVS_FW_CFG_DMA_BUFFER_CONFIG:
+ case AVS_FW_CFG_SCHEDULER_CONFIG:
+ case AVS_FW_CFG_CLOCKS_CONFIG:
+ break;
+
+ default:
+ dev_info(adev->dev, "Unrecognized fw param: %d\n", tlv->type);
+ break;
+ }
+
+ offset += sizeof(*tlv) + tlv->length;
+ }
+
+ /* No longer needed, free it as it's owned by the get_large_config() caller. */
+ kfree(payload);
+ return ret;
+}
+
+int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
+{
+ struct avs_tlv *tlv;
+ size_t payload_size;
+ size_t size, offset = 0;
+ u8 *payload;
+ int ret;
+
+ ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID,
+ AVS_BASEFW_HARDWARE_CONFIG, NULL, 0,
+ &payload, &payload_size);
+ if (ret)
+ return ret;
+
+ while (offset < payload_size) {
+ tlv = (struct avs_tlv *)(payload + offset);
+
+ switch (tlv->type) {
+ case AVS_HW_CFG_AVS_VER:
+ cfg->avs_version = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_DSP_CORES:
+ cfg->dsp_cores = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_MEM_PAGE_BYTES:
+ cfg->mem_page_bytes = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_TOTAL_PHYS_MEM_PAGES:
+ cfg->total_phys_mem_pages = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_I2S_CAPS:
+ cfg->i2s_caps.i2s_version = tlv->value[0];
+ size = tlv->value[1];
+ cfg->i2s_caps.ctrl_count = size;
+ if (!size)
+ break;
+
+ /* Multiply to get entire array size. */
+ size *= sizeof(*cfg->i2s_caps.ctrl_base_addr);
+ cfg->i2s_caps.ctrl_base_addr = devm_kmemdup(adev->dev,
+ &tlv->value[2],
+ size, GFP_KERNEL);
+ if (!cfg->i2s_caps.ctrl_base_addr) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ break;
+
+ case AVS_HW_CFG_GATEWAY_COUNT:
+ cfg->gateway_count = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_HP_EBB_COUNT:
+ cfg->hp_ebb_count = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_LP_EBB_COUNT:
+ cfg->lp_ebb_count = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_EBB_SIZE_BYTES:
+ cfg->ebb_size_bytes = *tlv->value;
+ break;
+
+ case AVS_HW_CFG_GPDMA_CAPS:
+ break;
+
+ default:
+ dev_info(adev->dev, "Unrecognized hw config: %d\n", tlv->type);
+ break;
+ }
+
+ offset += sizeof(*tlv) + tlv->length;
+ }
+
+exit:
+ /* No longer needed, free it as it's owned by the get_large_config() caller. */
+ kfree(payload);
+ return ret;
+}
+
+int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info)
+{
+ size_t payload_size;
+ u8 *payload;
+ int ret;
+
+ ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID,
+ AVS_BASEFW_MODULES_INFO, NULL, 0,
+ &payload, &payload_size);
+ if (ret)
+ return ret;
+
+ *info = (struct avs_mods_info *)payload;
+ return 0;
+}
+
+int avs_ipc_copier_set_sink_format(struct avs_dev *adev, u16 module_id,
+ u8 instance_id, u32 sink_id,
+ const struct avs_audio_format *src_fmt,
+ const struct avs_audio_format *sink_fmt)
+{
+ struct avs_copier_sink_format cpr_fmt;
+
+ cpr_fmt.sink_id = sink_id;
+ /* Firmware expects driver to resend copier's input format. */
+ cpr_fmt.src_fmt = *src_fmt;
+ cpr_fmt.sink_fmt = *sink_fmt;
+
+ return avs_ipc_set_large_config(adev, module_id, instance_id,
+ AVS_COPIER_SET_SINK_FORMAT,
+ (u8 *)&cpr_fmt, sizeof(cpr_fmt));
+}
diff --git a/sound/soc/intel/avs/messages.h b/sound/soc/intel/avs/messages.h
new file mode 100644
index 000000000000..0395dd7150eb
--- /dev/null
+++ b/sound/soc/intel/avs/messages.h
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_INTEL_AVS_MSGS_H
+#define __SOUND_SOC_INTEL_AVS_MSGS_H
+
+struct avs_dev;
+
+#define AVS_MAILBOX_SIZE 4096
+
+enum avs_msg_target {
+ AVS_FW_GEN_MSG = 0,
+ AVS_MOD_MSG = 1
+};
+
+enum avs_msg_direction {
+ AVS_MSG_REQUEST = 0,
+ AVS_MSG_REPLY = 1
+};
+
+enum avs_global_msg_type {
+ AVS_GLB_ROM_CONTROL = 1,
+ AVS_GLB_LOAD_MULTIPLE_MODULES = 15,
+ AVS_GLB_UNLOAD_MULTIPLE_MODULES = 16,
+ AVS_GLB_CREATE_PIPELINE = 17,
+ AVS_GLB_DELETE_PIPELINE = 18,
+ AVS_GLB_SET_PIPELINE_STATE = 19,
+ AVS_GLB_GET_PIPELINE_STATE = 20,
+ AVS_GLB_LOAD_LIBRARY = 24,
+ AVS_GLB_NOTIFICATION = 27,
+};
+
+union avs_global_msg {
+ u64 val;
+ struct {
+ union {
+ u32 primary;
+ struct {
+ u32 rsvd:24;
+ u32 global_msg_type:5;
+ u32 msg_direction:1;
+ u32 msg_target:1;
+ };
+ /* set boot config */
+ struct {
+ u32 rom_ctrl_msg_type:9;
+ u32 dma_id:5;
+ u32 purge_request:1;
+ } boot_cfg;
+ /* module loading */
+ struct {
+ u32 mod_cnt:8;
+ } load_multi_mods;
+ /* pipeline management */
+ struct {
+ u32 ppl_mem_size:11;
+ u32 ppl_priority:5;
+ u32 instance_id:8;
+ } create_ppl;
+ struct {
+ u32 rsvd:16;
+ u32 instance_id:8;
+ } ppl; /* generic ppl request */
+ struct {
+ u32 state:16;
+ u32 ppl_id:8;
+ } set_ppl_state;
+ struct {
+ u32 ppl_id:8;
+ } get_ppl_state;
+ /* library loading */
+ struct {
+ u32 dma_id:5;
+ u32 rsvd:11;
+ u32 lib_id:4;
+ } load_lib;
+ };
+ union {
+ u32 val;
+ /* pipeline management */
+ struct {
+ u32 lp:1; /* low power flag */
+ u32 rsvd:3;
+ u32 attributes:16; /* additional scheduling flags */
+ } create_ppl;
+ } ext;
+ };
+} __packed;
+
+struct avs_tlv {
+ u32 type;
+ u32 length;
+ u32 value[];
+} __packed;
+
+enum avs_module_msg_type {
+ AVS_MOD_INIT_INSTANCE = 0,
+ AVS_MOD_LARGE_CONFIG_GET = 3,
+ AVS_MOD_LARGE_CONFIG_SET = 4,
+ AVS_MOD_BIND = 5,
+ AVS_MOD_UNBIND = 6,
+ AVS_MOD_SET_DX = 7,
+ AVS_MOD_SET_D0IX = 8,
+ AVS_MOD_DELETE_INSTANCE = 11,
+};
+
+union avs_module_msg {
+ u64 val;
+ struct {
+ union {
+ u32 primary;
+ struct {
+ u32 module_id:16;
+ u32 instance_id:8;
+ u32 module_msg_type:5;
+ u32 msg_direction:1;
+ u32 msg_target:1;
+ };
+ };
+ union {
+ u32 val;
+ struct {
+ u32 param_block_size:16;
+ u32 ppl_instance_id:8;
+ u32 core_id:4;
+ u32 proc_domain:1;
+ } init_instance;
+ struct {
+ u32 data_off_size:20;
+ u32 large_param_id:8;
+ u32 final_block:1;
+ u32 init_block:1;
+ } large_config;
+ struct {
+ u32 dst_module_id:16;
+ u32 dst_instance_id:8;
+ u32 dst_queue:3;
+ u32 src_queue:3;
+ } bind_unbind;
+ struct {
+ u32 wake:1;
+ u32 streaming:1;
+ } set_d0ix;
+ } ext;
+ };
+} __packed;
+
+union avs_reply_msg {
+ u64 val;
+ struct {
+ union {
+ u32 primary;
+ struct {
+ u32 status:24;
+ u32 global_msg_type:5;
+ u32 msg_direction:1;
+ u32 msg_target:1;
+ };
+ };
+ union {
+ u32 val;
+ /* module loading */
+ struct {
+ u32 err_mod_id:16;
+ } load_multi_mods;
+ /* pipeline management */
+ struct {
+ u32 state:5;
+ } get_ppl_state;
+ /* module management */
+ struct {
+ u32 data_off_size:20;
+ u32 large_param_id:8;
+ u32 final_block:1;
+ u32 init_block:1;
+ } large_config;
+ } ext;
+ };
+} __packed;
+
+enum avs_notify_msg_type {
+ AVS_NOTIFY_PHRASE_DETECTED = 4,
+ AVS_NOTIFY_RESOURCE_EVENT = 5,
+ AVS_NOTIFY_FW_READY = 8,
+ AVS_NOTIFY_MODULE_EVENT = 12,
+};
+
+union avs_notify_msg {
+ u64 val;
+ struct {
+ union {
+ u32 primary;
+ struct {
+ u32 rsvd:16;
+ u32 notify_msg_type:8;
+ u32 global_msg_type:5;
+ u32 msg_direction:1;
+ u32 msg_target:1;
+ };
+ };
+ union {
+ u32 val;
+ } ext;
+ };
+} __packed;
+
+#define AVS_MSG(hdr) { .val = hdr }
+
+#define AVS_GLOBAL_REQUEST(msg_type) \
+{ \
+ .global_msg_type = AVS_GLB_##msg_type, \
+ .msg_direction = AVS_MSG_REQUEST, \
+ .msg_target = AVS_FW_GEN_MSG, \
+}
+
+#define AVS_MODULE_REQUEST(msg_type) \
+{ \
+ .module_msg_type = AVS_MOD_##msg_type, \
+ .msg_direction = AVS_MSG_REQUEST, \
+ .msg_target = AVS_MOD_MSG, \
+}
+
+#define AVS_NOTIFICATION(msg_type) \
+{ \
+ .notify_msg_type = AVS_NOTIFY_##msg_type,\
+ .global_msg_type = AVS_GLB_NOTIFICATION,\
+ .msg_direction = AVS_MSG_REPLY, \
+ .msg_target = AVS_FW_GEN_MSG, \
+}
+
+#define avs_msg_is_reply(hdr) \
+({ \
+ union avs_reply_msg __msg = AVS_MSG(hdr); \
+ __msg.msg_direction == AVS_MSG_REPLY && \
+ __msg.global_msg_type != AVS_GLB_NOTIFICATION; \
+})
+
+/* Notification types */
+
+struct avs_notify_voice_data {
+ u16 kpd_score;
+ u16 reserved;
+} __packed;
+
+struct avs_notify_res_data {
+ u32 resource_type;
+ u32 resource_id;
+ u32 event_type;
+ u32 reserved;
+ u32 data[6];
+} __packed;
+
+struct avs_notify_mod_data {
+ u32 module_instance_id;
+ u32 event_id;
+ u32 data_size;
+ u32 data[];
+} __packed;
+
+/* ROM messages */
+enum avs_rom_control_msg_type {
+ AVS_ROM_SET_BOOT_CONFIG = 0,
+};
+
+int avs_ipc_set_boot_config(struct avs_dev *adev, u32 dma_id, u32 purge);
+
+/* Code loading messages */
+int avs_ipc_load_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids);
+int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids);
+int avs_ipc_load_library(struct avs_dev *adev, u32 dma_id, u32 lib_id);
+
+/* Pipeline management messages */
+enum avs_pipeline_state {
+ AVS_PPL_STATE_INVALID,
+ AVS_PPL_STATE_UNINITIALIZED,
+ AVS_PPL_STATE_RESET,
+ AVS_PPL_STATE_PAUSED,
+ AVS_PPL_STATE_RUNNING,
+};
+
+int avs_ipc_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
+ u8 instance_id, bool lp, u16 attributes);
+int avs_ipc_delete_pipeline(struct avs_dev *adev, u8 instance_id);
+int avs_ipc_set_pipeline_state(struct avs_dev *adev, u8 instance_id,
+ enum avs_pipeline_state state);
+int avs_ipc_get_pipeline_state(struct avs_dev *adev, u8 instance_id,
+ enum avs_pipeline_state *state);
+
+/* Module management messages */
+int avs_ipc_init_instance(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u8 ppl_id, u8 core_id, u8 domain,
+ void *param, u32 param_size);
+int avs_ipc_delete_instance(struct avs_dev *adev, u16 module_id, u8 instance_id);
+int avs_ipc_bind(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u16 dst_module_id, u8 dst_instance_id,
+ u8 dst_queue, u8 src_queue);
+int avs_ipc_unbind(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u16 dst_module_id, u8 dst_instance_id,
+ u8 dst_queue, u8 src_queue);
+int avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id,
+ u8 instance_id, u8 param_id,
+ u8 *request, size_t request_size);
+int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id,
+ u8 param_id, u8 *request_data, size_t request_size,
+ u8 **reply_data, size_t *reply_size);
+
+/* DSP cores and domains power management messages */
+struct avs_dxstate_info {
+ u32 core_mask; /* which cores are subject for power transition */
+ u32 dx_mask; /* bit[n]=1 core n goes to D0, bit[n]=0 it goes to D3 */
+} __packed;
+
+int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup);
+int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming);
+
+/* Base-firmware runtime parameters */
+
+#define AVS_BASEFW_MOD_ID 0
+#define AVS_BASEFW_INST_ID 0
+
+enum avs_basefw_runtime_param {
+ AVS_BASEFW_FIRMWARE_CONFIG = 7,
+ AVS_BASEFW_HARDWARE_CONFIG = 8,
+ AVS_BASEFW_MODULES_INFO = 9,
+ AVS_BASEFW_LIBRARIES_INFO = 16,
+};
+
+struct avs_fw_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+};
+
+enum avs_fw_cfg_params {
+ AVS_FW_CFG_FW_VERSION = 0,
+ AVS_FW_CFG_MEMORY_RECLAIMED,
+ AVS_FW_CFG_SLOW_CLOCK_FREQ_HZ,
+ AVS_FW_CFG_FAST_CLOCK_FREQ_HZ,
+ AVS_FW_CFG_DMA_BUFFER_CONFIG,
+ AVS_FW_CFG_ALH_SUPPORT_LEVEL,
+ AVS_FW_CFG_IPC_DL_MAILBOX_BYTES,
+ AVS_FW_CFG_IPC_UL_MAILBOX_BYTES,
+ AVS_FW_CFG_TRACE_LOG_BYTES,
+ AVS_FW_CFG_MAX_PPL_COUNT,
+ AVS_FW_CFG_MAX_ASTATE_COUNT,
+ AVS_FW_CFG_MAX_MODULE_PIN_COUNT,
+ AVS_FW_CFG_MODULES_COUNT,
+ AVS_FW_CFG_MAX_MOD_INST_COUNT,
+ AVS_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT,
+ AVS_FW_CFG_LL_PRI_COUNT,
+ AVS_FW_CFG_MAX_DP_TASKS_COUNT,
+ AVS_FW_CFG_MAX_LIBS_COUNT,
+ AVS_FW_CFG_SCHEDULER_CONFIG,
+ AVS_FW_CFG_XTAL_FREQ_HZ,
+ AVS_FW_CFG_CLOCKS_CONFIG,
+ AVS_FW_CFG_RESERVED,
+ AVS_FW_CFG_POWER_GATING_POLICY,
+ AVS_FW_CFG_ASSERT_MODE,
+};
+
+struct avs_fw_cfg {
+ struct avs_fw_version fw_version;
+ u32 memory_reclaimed;
+ u32 slow_clock_freq_hz;
+ u32 fast_clock_freq_hz;
+ u32 alh_support;
+ u32 ipc_dl_mailbox_bytes;
+ u32 ipc_ul_mailbox_bytes;
+ u32 trace_log_bytes;
+ u32 max_ppl_count;
+ u32 max_astate_count;
+ u32 max_module_pin_count;
+ u32 modules_count;
+ u32 max_mod_inst_count;
+ u32 max_ll_tasks_per_pri_count;
+ u32 ll_pri_count;
+ u32 max_dp_tasks_count;
+ u32 max_libs_count;
+ u32 xtal_freq_hz;
+ u32 power_gating_policy;
+};
+
+int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg);
+
+enum avs_hw_cfg_params {
+ AVS_HW_CFG_AVS_VER,
+ AVS_HW_CFG_DSP_CORES,
+ AVS_HW_CFG_MEM_PAGE_BYTES,
+ AVS_HW_CFG_TOTAL_PHYS_MEM_PAGES,
+ AVS_HW_CFG_I2S_CAPS,
+ AVS_HW_CFG_GPDMA_CAPS,
+ AVS_HW_CFG_GATEWAY_COUNT,
+ AVS_HW_CFG_HP_EBB_COUNT,
+ AVS_HW_CFG_LP_EBB_COUNT,
+ AVS_HW_CFG_EBB_SIZE_BYTES,
+};
+
+enum avs_iface_version {
+ AVS_AVS_VER_1_5 = 0x10005,
+ AVS_AVS_VER_1_8 = 0x10008,
+};
+
+enum avs_i2s_version {
+ AVS_I2S_VER_15_SKYLAKE = 0x00000,
+ AVS_I2S_VER_15_BROXTON = 0x10000,
+ AVS_I2S_VER_15_BROXTON_P = 0x20000,
+ AVS_I2S_VER_18_KBL_CNL = 0x30000,
+};
+
+struct avs_i2s_caps {
+ u32 i2s_version;
+ u32 ctrl_count;
+ u32 *ctrl_base_addr;
+};
+
+struct avs_hw_cfg {
+ u32 avs_version;
+ u32 dsp_cores;
+ u32 mem_page_bytes;
+ u32 total_phys_mem_pages;
+ struct avs_i2s_caps i2s_caps;
+ u32 gateway_count;
+ u32 hp_ebb_count;
+ u32 lp_ebb_count;
+ u32 ebb_size_bytes;
+};
+
+int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg);
+
+#define AVS_MODULE_LOAD_TYPE_BUILTIN 0
+#define AVS_MODULE_LOAD_TYPE_LOADABLE 1
+#define AVS_MODULE_STATE_LOADED BIT(0)
+
+struct avs_module_type {
+ u32 load_type:4;
+ u32 auto_start:1;
+ u32 domain_ll:1;
+ u32 domain_dp:1;
+ u32 lib_code:1;
+ u32 rsvd:24;
+} __packed;
+
+union avs_segment_flags {
+ u32 ul;
+ struct {
+ u32 contents:1;
+ u32 alloc:1;
+ u32 load:1;
+ u32 readonly:1;
+ u32 code:1;
+ u32 data:1;
+ u32 rsvd_1:2;
+ u32 type:4;
+ u32 rsvd_2:4;
+ u32 length:16;
+ };
+} __packed;
+
+struct avs_segment_desc {
+ union avs_segment_flags flags;
+ u32 v_base_addr;
+ u32 file_offset;
+} __packed;
+
+struct avs_module_entry {
+ u16 module_id;
+ u16 state_flags;
+ u8 name[8];
+ guid_t uuid;
+ struct avs_module_type type;
+ u8 hash[32];
+ u32 entry_point;
+ u16 cfg_offset;
+ u16 cfg_count;
+ u32 affinity_mask;
+ u16 instance_max_count;
+ u16 instance_bss_size;
+ struct avs_segment_desc segments[3];
+} __packed;
+
+struct avs_mods_info {
+ u32 count;
+ struct avs_module_entry entries[];
+} __packed;
+
+static inline bool avs_module_entry_is_loaded(struct avs_module_entry *mentry)
+{
+ return mentry->type.load_type == AVS_MODULE_LOAD_TYPE_BUILTIN ||
+ mentry->state_flags & AVS_MODULE_STATE_LOADED;
+}
+
+int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info);
+
+/* Module configuration */
+
+#define AVS_MIXIN_MOD_UUID \
+ GUID_INIT(0x39656EB2, 0x3B71, 0x4049, 0x8D, 0x3F, 0xF9, 0x2C, 0xD5, 0xC4, 0x3C, 0x09)
+
+#define AVS_MIXOUT_MOD_UUID \
+ GUID_INIT(0x3C56505A, 0x24D7, 0x418F, 0xBD, 0xDC, 0xC1, 0xF5, 0xA3, 0xAC, 0x2A, 0xE0)
+
+#define AVS_COPIER_MOD_UUID \
+ GUID_INIT(0x9BA00C83, 0xCA12, 0x4A83, 0x94, 0x3C, 0x1F, 0xA2, 0xE8, 0x2F, 0x9D, 0xDA)
+
+#define AVS_KPBUFF_MOD_UUID \
+ GUID_INIT(0xA8A0CB32, 0x4A77, 0x4DB1, 0x85, 0xC7, 0x53, 0xD7, 0xEE, 0x07, 0xBC, 0xE6)
+
+#define AVS_MICSEL_MOD_UUID \
+ GUID_INIT(0x32FE92C1, 0x1E17, 0x4FC2, 0x97, 0x58, 0xC7, 0xF3, 0x54, 0x2E, 0x98, 0x0A)
+
+#define AVS_MUX_MOD_UUID \
+ GUID_INIT(0x64CE6E35, 0x857A, 0x4878, 0xAC, 0xE8, 0xE2, 0xA2, 0xF4, 0x2e, 0x30, 0x69)
+
+#define AVS_UPDWMIX_MOD_UUID \
+ GUID_INIT(0x42F8060C, 0x832F, 0x4DBF, 0xB2, 0x47, 0x51, 0xE9, 0x61, 0x99, 0x7b, 0x35)
+
+#define AVS_SRCINTC_MOD_UUID \
+ GUID_INIT(0xE61BB28D, 0x149A, 0x4C1F, 0xB7, 0x09, 0x46, 0x82, 0x3E, 0xF5, 0xF5, 0xAE)
+
+#define AVS_PROBE_MOD_UUID \
+ GUID_INIT(0x7CAD0808, 0xAB10, 0xCD23, 0xEF, 0x45, 0x12, 0xAB, 0x34, 0xCD, 0x56, 0xEF)
+
+#define AVS_AEC_MOD_UUID \
+ GUID_INIT(0x46CB87FB, 0xD2C9, 0x4970, 0x96, 0xD2, 0x6D, 0x7E, 0x61, 0x4B, 0xB6, 0x05)
+
+#define AVS_ASRC_MOD_UUID \
+ GUID_INIT(0x66B4402D, 0xB468, 0x42F2, 0x81, 0xA7, 0xB3, 0x71, 0x21, 0x86, 0x3D, 0xD4)
+
+#define AVS_INTELWOV_MOD_UUID \
+ GUID_INIT(0xEC774FA9, 0x28D3, 0x424A, 0x90, 0xE4, 0x69, 0xF9, 0x84, 0xF1, 0xEE, 0xB7)
+
+/* channel map */
+enum avs_channel_index {
+ AVS_CHANNEL_LEFT = 0,
+ AVS_CHANNEL_RIGHT = 1,
+ AVS_CHANNEL_CENTER = 2,
+ AVS_CHANNEL_LEFT_SURROUND = 3,
+ AVS_CHANNEL_CENTER_SURROUND = 3,
+ AVS_CHANNEL_RIGHT_SURROUND = 4,
+ AVS_CHANNEL_LFE = 7,
+ AVS_CHANNEL_INVALID = 0xF,
+};
+
+enum avs_channel_config {
+ AVS_CHANNEL_CONFIG_MONO = 0,
+ AVS_CHANNEL_CONFIG_STEREO = 1,
+ AVS_CHANNEL_CONFIG_2_1 = 2,
+ AVS_CHANNEL_CONFIG_3_0 = 3,
+ AVS_CHANNEL_CONFIG_3_1 = 4,
+ AVS_CHANNEL_CONFIG_QUATRO = 5,
+ AVS_CHANNEL_CONFIG_4_0 = 6,
+ AVS_CHANNEL_CONFIG_5_0 = 7,
+ AVS_CHANNEL_CONFIG_5_1 = 8,
+ AVS_CHANNEL_CONFIG_DUAL_MONO = 9,
+ AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_0 = 10,
+ AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_1 = 11,
+ AVS_CHANNEL_CONFIG_4_CHANNEL = 12,
+ AVS_CHANNEL_CONFIG_INVALID
+};
+
+enum avs_interleaving {
+ AVS_INTERLEAVING_PER_CHANNEL = 0,
+ AVS_INTERLEAVING_PER_SAMPLE = 1,
+};
+
+enum avs_sample_type {
+ AVS_SAMPLE_TYPE_INT_MSB = 0,
+ AVS_SAMPLE_TYPE_INT_LSB = 1,
+ AVS_SAMPLE_TYPE_INT_SIGNED = 2,
+ AVS_SAMPLE_TYPE_INT_UNSIGNED = 3,
+ AVS_SAMPLE_TYPE_FLOAT = 4,
+};
+
+#define AVS_CHANNELS_MAX 8
+#define AVS_ALL_CHANNELS_MASK UINT_MAX
+
+struct avs_audio_format {
+ u32 sampling_freq;
+ u32 bit_depth;
+ u32 channel_map;
+ u32 channel_config;
+ u32 interleaving;
+ u32 num_channels:8;
+ u32 valid_bit_depth:8;
+ u32 sample_type:8;
+ u32 reserved:8;
+} __packed;
+
+struct avs_modcfg_base {
+ u32 cpc;
+ u32 ibs;
+ u32 obs;
+ u32 is_pages;
+ struct avs_audio_format audio_fmt;
+} __packed;
+
+struct avs_pin_format {
+ u32 pin_index;
+ u32 iobs;
+ struct avs_audio_format audio_fmt;
+} __packed;
+
+struct avs_modcfg_ext {
+ struct avs_modcfg_base base;
+ u16 num_input_pins;
+ u16 num_output_pins;
+ u8 reserved[12];
+ /* input pin formats followed by output ones */
+ struct avs_pin_format pin_fmts[];
+} __packed;
+
+enum avs_dma_type {
+ AVS_DMA_HDA_HOST_OUTPUT = 0,
+ AVS_DMA_HDA_HOST_INPUT = 1,
+ AVS_DMA_HDA_LINK_OUTPUT = 8,
+ AVS_DMA_HDA_LINK_INPUT = 9,
+ AVS_DMA_DMIC_LINK_INPUT = 11,
+ AVS_DMA_I2S_LINK_OUTPUT = 12,
+ AVS_DMA_I2S_LINK_INPUT = 13,
+};
+
+union avs_virtual_index {
+ u8 val;
+ struct {
+ u8 time_slot:4;
+ u8 instance:4;
+ } i2s;
+ struct {
+ u8 queue_id:3;
+ u8 time_slot:2;
+ u8 instance:3;
+ } dmic;
+} __packed;
+
+union avs_connector_node_id {
+ u32 val;
+ struct {
+ u32 vindex:8;
+ u32 dma_type:5;
+ u32 rsvd:19;
+ };
+} __packed;
+
+#define INVALID_PIPELINE_ID 0xFF
+#define INVALID_NODE_ID \
+ ((union avs_connector_node_id) { UINT_MAX })
+
+union avs_gtw_attributes {
+ u32 val;
+ struct {
+ u32 lp_buffer_alloc:1;
+ u32 rsvd:31;
+ };
+} __packed;
+
+struct avs_copier_gtw_cfg {
+ union avs_connector_node_id node_id;
+ u32 dma_buffer_size;
+ u32 config_length;
+ struct {
+ union avs_gtw_attributes attrs;
+ u32 blob[];
+ } config;
+} __packed;
+
+struct avs_copier_cfg {
+ struct avs_modcfg_base base;
+ struct avs_audio_format out_fmt;
+ u32 feature_mask;
+ struct avs_copier_gtw_cfg gtw_cfg;
+} __packed;
+
+struct avs_micsel_cfg {
+ struct avs_modcfg_base base;
+ struct avs_audio_format out_fmt;
+} __packed;
+
+struct avs_mux_cfg {
+ struct avs_modcfg_base base;
+ struct avs_audio_format ref_fmt;
+ struct avs_audio_format out_fmt;
+} __packed;
+
+struct avs_updown_mixer_cfg {
+ struct avs_modcfg_base base;
+ u32 out_channel_config;
+ u32 coefficients_select;
+ s32 coefficients[AVS_CHANNELS_MAX];
+ u32 channel_map;
+} __packed;
+
+struct avs_src_cfg {
+ struct avs_modcfg_base base;
+ u32 out_freq;
+} __packed;
+
+struct avs_probe_gtw_cfg {
+ union avs_connector_node_id node_id;
+ u32 dma_buffer_size;
+} __packed;
+
+struct avs_probe_cfg {
+ struct avs_modcfg_base base;
+ struct avs_probe_gtw_cfg gtw_cfg;
+} __packed;
+
+struct avs_aec_cfg {
+ struct avs_modcfg_base base;
+ struct avs_audio_format ref_fmt;
+ struct avs_audio_format out_fmt;
+ u32 cpc_lp_mode;
+} __packed;
+
+struct avs_asrc_cfg {
+ struct avs_modcfg_base base;
+ u32 out_freq;
+ u32 rsvd0:1;
+ u32 mode:1;
+ u32 rsvd2:2;
+ u32 disable_jitter_buffer:1;
+ u32 rsvd3:27;
+} __packed;
+
+struct avs_wov_cfg {
+ struct avs_modcfg_base base;
+ u32 cpc_lp_mode;
+} __packed;
+
+/* Module runtime parameters */
+
+enum avs_copier_runtime_param {
+ AVS_COPIER_SET_SINK_FORMAT = 2,
+};
+
+struct avs_copier_sink_format {
+ u32 sink_id;
+ struct avs_audio_format src_fmt;
+ struct avs_audio_format sink_fmt;
+} __packed;
+
+int avs_ipc_copier_set_sink_format(struct avs_dev *adev, u16 module_id,
+ u8 instance_id, u32 sink_id,
+ const struct avs_audio_format *src_fmt,
+ const struct avs_audio_format *sink_fmt);
+
+#endif /* __SOUND_SOC_INTEL_AVS_MSGS_H */
diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h
new file mode 100644
index 000000000000..3fd02389ed2b
--- /dev/null
+++ b/sound/soc/intel/avs/registers.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __SOUND_SOC_INTEL_AVS_REGS_H
+#define __SOUND_SOC_INTEL_AVS_REGS_H
+
+#define AZX_PCIREG_PGCTL 0x44
+#define AZX_PCIREG_CGCTL 0x48
+#define AZX_PGCTL_LSRMD_MASK BIT(4)
+#define AZX_CGCTL_MISCBDCGE_MASK BIT(6)
+#define AZX_VS_EM2_L1SEN BIT(13)
+
+/* Intel HD Audio General DSP Registers */
+#define AVS_ADSP_GEN_BASE 0x0
+#define AVS_ADSP_REG_ADSPCS (AVS_ADSP_GEN_BASE + 0x04)
+#define AVS_ADSP_REG_ADSPIC (AVS_ADSP_GEN_BASE + 0x08)
+#define AVS_ADSP_REG_ADSPIS (AVS_ADSP_GEN_BASE + 0x0C)
+
+#define AVS_ADSP_ADSPIC_IPC BIT(0)
+#define AVS_ADSP_ADSPIC_CLDMA BIT(1)
+#define AVS_ADSP_ADSPIS_IPC BIT(0)
+#define AVS_ADSP_ADSPIS_CLDMA BIT(1)
+
+#define AVS_ADSPCS_CRST_MASK(cm) (cm)
+#define AVS_ADSPCS_CSTALL_MASK(cm) ((cm) << 8)
+#define AVS_ADSPCS_SPA_MASK(cm) ((cm) << 16)
+#define AVS_ADSPCS_CPA_MASK(cm) ((cm) << 24)
+#define AVS_MAIN_CORE_MASK BIT(0)
+
+#define AVS_ADSP_HIPCCTL_BUSY BIT(0)
+#define AVS_ADSP_HIPCCTL_DONE BIT(1)
+
+/* SKL Intel HD Audio Inter-Processor Communication Registers */
+#define SKL_ADSP_IPC_BASE 0x40
+#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00)
+#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04)
+#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08)
+#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C)
+#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10)
+
+#define SKL_ADSP_HIPCI_BUSY BIT(31)
+#define SKL_ADSP_HIPCIE_DONE BIT(30)
+#define SKL_ADSP_HIPCT_BUSY BIT(31)
+
+/* Constants used when accessing SRAM, space shared with firmware */
+#define AVS_FW_REG_BASE(adev) ((adev)->spec->sram_base_offset)
+#define AVS_FW_REG_STATUS(adev) (AVS_FW_REG_BASE(adev) + 0x0)
+#define AVS_FW_REG_ERROR_CODE(adev) (AVS_FW_REG_BASE(adev) + 0x4)
+
+#define AVS_FW_REGS_SIZE PAGE_SIZE
+#define AVS_FW_REGS_WINDOW 0
+/* DSP -> HOST communication window */
+#define AVS_UPLINK_WINDOW AVS_FW_REGS_WINDOW
+/* HOST -> DSP communication window */
+#define AVS_DOWNLINK_WINDOW 1
+
+/* registry I/O helpers */
+#define avs_sram_offset(adev, window_idx) \
+ ((adev)->spec->sram_base_offset + \
+ (adev)->spec->sram_window_size * (window_idx))
+
+#define avs_sram_addr(adev, window_idx) \
+ ((adev)->dsp_ba + avs_sram_offset(adev, window_idx))
+
+#define avs_uplink_addr(adev) \
+ (avs_sram_addr(adev, AVS_UPLINK_WINDOW) + AVS_FW_REGS_SIZE)
+#define avs_downlink_addr(adev) \
+ avs_sram_addr(adev, AVS_DOWNLINK_WINDOW)
+
+#endif /* __SOUND_SOC_INTEL_AVS_REGS_H */
diff --git a/sound/soc/intel/avs/utils.c b/sound/soc/intel/avs/utils.c
new file mode 100644
index 000000000000..6473e3ae4c6e
--- /dev/null
+++ b/sound/soc/intel/avs/utils.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include "avs.h"
+#include "messages.h"
+
+/* Caller responsible for holding adev->modres_mutex. */
+static int avs_module_entry_index(struct avs_dev *adev, const guid_t *uuid)
+{
+ int i;
+
+ for (i = 0; i < adev->mods_info->count; i++) {
+ struct avs_module_entry *module;
+
+ module = &adev->mods_info->entries[i];
+ if (guid_equal(&module->uuid, uuid))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/* Caller responsible for holding adev->modres_mutex. */
+static int avs_module_id_entry_index(struct avs_dev *adev, u32 module_id)
+{
+ int i;
+
+ for (i = 0; i < adev->mods_info->count; i++) {
+ struct avs_module_entry *module;
+
+ module = &adev->mods_info->entries[i];
+ if (module->module_id == module_id)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry)
+{
+ int idx;
+
+ mutex_lock(&adev->modres_mutex);
+
+ idx = avs_module_entry_index(adev, uuid);
+ if (idx >= 0)
+ memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
+
+ mutex_unlock(&adev->modres_mutex);
+ return (idx < 0) ? idx : 0;
+}
+
+int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry)
+{
+ int idx;
+
+ mutex_lock(&adev->modres_mutex);
+
+ idx = avs_module_id_entry_index(adev, module_id);
+ if (idx >= 0)
+ memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
+
+ mutex_unlock(&adev->modres_mutex);
+ return (idx < 0) ? idx : 0;
+}
+
+int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid)
+{
+ struct avs_module_entry module;
+ int ret;
+
+ ret = avs_get_module_entry(adev, uuid, &module);
+ return !ret ? module.module_id : -ENOENT;
+}
+
+bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id)
+{
+ bool ret = false;
+ int idx;
+
+ mutex_lock(&adev->modres_mutex);
+
+ idx = avs_module_id_entry_index(adev, module_id);
+ if (idx >= 0)
+ ret = ida_is_empty(adev->mod_idas[idx]);
+
+ mutex_unlock(&adev->modres_mutex);
+ return ret;
+}
+
+/* Caller responsible for holding adev->modres_mutex. */
+static void avs_module_ida_destroy(struct avs_dev *adev)
+{
+ int i = adev->mods_info ? adev->mods_info->count : 0;
+
+ while (i--) {
+ ida_destroy(adev->mod_idas[i]);
+ kfree(adev->mod_idas[i]);
+ }
+ kfree(adev->mod_idas);
+}
+
+/* Caller responsible for holding adev->modres_mutex. */
+static int
+avs_module_ida_alloc(struct avs_dev *adev, struct avs_mods_info *newinfo, bool purge)
+{
+ struct avs_mods_info *oldinfo = adev->mods_info;
+ struct ida **ida_ptrs;
+ u32 tocopy_count = 0;
+ int i;
+
+ if (!purge && oldinfo) {
+ if (oldinfo->count >= newinfo->count)
+ dev_warn(adev->dev, "refreshing %d modules info with %d\n",
+ oldinfo->count, newinfo->count);
+ tocopy_count = oldinfo->count;
+ }
+
+ ida_ptrs = kcalloc(newinfo->count, sizeof(*ida_ptrs), GFP_KERNEL);
+ if (!ida_ptrs)
+ return -ENOMEM;
+
+ if (tocopy_count)
+ memcpy(ida_ptrs, adev->mod_idas, tocopy_count * sizeof(*ida_ptrs));
+
+ for (i = tocopy_count; i < newinfo->count; i++) {
+ ida_ptrs[i] = kzalloc(sizeof(**ida_ptrs), GFP_KERNEL);
+ if (!ida_ptrs[i]) {
+ while (i--)
+ kfree(ida_ptrs[i]);
+
+ kfree(ida_ptrs);
+ return -ENOMEM;
+ }
+
+ ida_init(ida_ptrs[i]);
+ }
+
+ /* If old elements have been reused, don't wipe them. */
+ if (tocopy_count)
+ kfree(adev->mod_idas);
+ else
+ avs_module_ida_destroy(adev);
+
+ adev->mod_idas = ida_ptrs;
+ return 0;
+}
+
+int avs_module_info_init(struct avs_dev *adev, bool purge)
+{
+ struct avs_mods_info *info;
+ int ret;
+
+ ret = avs_ipc_get_modules_info(adev, &info);
+ if (ret)
+ return AVS_IPC_RET(ret);
+
+ mutex_lock(&adev->modres_mutex);
+
+ ret = avs_module_ida_alloc(adev, info, purge);
+ if (ret < 0) {
+ dev_err(adev->dev, "initialize module idas failed: %d\n", ret);
+ goto exit;
+ }
+
+ /* Refresh current information with newly received table. */
+ kfree(adev->mods_info);
+ adev->mods_info = info;
+
+exit:
+ mutex_unlock(&adev->modres_mutex);
+ return ret;
+}
+
+void avs_module_info_free(struct avs_dev *adev)
+{
+ mutex_lock(&adev->modres_mutex);
+
+ avs_module_ida_destroy(adev);
+ kfree(adev->mods_info);
+ adev->mods_info = NULL;
+
+ mutex_unlock(&adev->modres_mutex);
+}
+
+int avs_module_id_alloc(struct avs_dev *adev, u16 module_id)
+{
+ int ret, idx, max_id;
+
+ mutex_lock(&adev->modres_mutex);
+
+ idx = avs_module_id_entry_index(adev, module_id);
+ if (idx == -ENOENT) {
+ dev_err(adev->dev, "invalid module id: %d", module_id);
+ ret = -EINVAL;
+ goto exit;
+ }
+ max_id = adev->mods_info->entries[idx].instance_max_count - 1;
+ ret = ida_alloc_max(adev->mod_idas[idx], max_id, GFP_KERNEL);
+exit:
+ mutex_unlock(&adev->modres_mutex);
+ return ret;
+}
+
+void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id)
+{
+ int idx;
+
+ mutex_lock(&adev->modres_mutex);
+
+ idx = avs_module_id_entry_index(adev, module_id);
+ if (idx == -ENOENT) {
+ dev_err(adev->dev, "invalid module id: %d", module_id);
+ goto exit;
+ }
+
+ ida_free(adev->mod_idas[idx], instance_id);
+exit:
+ mutex_unlock(&adev->modres_mutex);
+}
+
+/*
+ * Once driver loads FW it should keep it in memory, so we are not affected
+ * by FW removal from filesystem or even worse by loading different FW at
+ * runtime suspend/resume.
+ */
+int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name)
+{
+ struct avs_fw_entry *entry;
+ int ret;
+
+ /* first check in list if it is not already loaded */
+ list_for_each_entry(entry, &adev->fw_list, node) {
+ if (!strcmp(name, entry->name)) {
+ *fw_p = entry->fw;
+ return 0;
+ }
+ }
+
+ /* FW is not loaded, let's load it now and add to the list */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->name = kstrdup(name, GFP_KERNEL);
+ if (!entry->name) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+
+ ret = request_firmware(&entry->fw, name, adev->dev);
+ if (ret < 0) {
+ kfree(entry->name);
+ kfree(entry);
+ return ret;
+ }
+
+ *fw_p = entry->fw;
+
+ list_add_tail(&entry->node, &adev->fw_list);
+
+ return 0;
+}
+
+/*
+ * Release single FW entry, used to handle errors in functions calling
+ * avs_request_firmware()
+ */
+void avs_release_last_firmware(struct avs_dev *adev)
+{
+ struct avs_fw_entry *entry;
+
+ entry = list_last_entry(&adev->fw_list, typeof(*entry), node);
+
+ list_del(&entry->node);
+ release_firmware(entry->fw);
+ kfree(entry->name);
+ kfree(entry);
+}
+
+/*
+ * Release all FW entries, used on driver removal
+ */
+void avs_release_firmwares(struct avs_dev *adev)
+{
+ struct avs_fw_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) {
+ list_del(&entry->node);
+ release_firmware(entry->fw);
+ kfree(entry->name);
+ kfree(entry);
+ }
+}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a088bc9f7dd7..ce153ac2c3ab 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2465,6 +2465,7 @@ struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
return dai;
}
+EXPORT_SYMBOL_GPL(snd_soc_register_dai);
/**
* snd_soc_unregister_dais - Unregister DAIs from the ASoC core
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b06c5682445c..b435b5c4cfb7 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2484,6 +2484,12 @@ static void dapm_free_path(struct snd_soc_dapm_path *path)
kfree(path);
}
+/**
+ * snd_soc_dapm_free_widget - Free specified widget
+ * @w: widget to free
+ *
+ * Removes widget from all paths and frees memory occupied by it.
+ */
void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
{
struct snd_soc_dapm_path *p, *next_p;
@@ -2506,6 +2512,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
kfree_const(w->sname);
kfree(w);
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_free_widget);
void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
{
@@ -4208,6 +4215,13 @@ param_fail:
return ERR_PTR(ret);
}
+/**
+ * snd_soc_dapm_new_dai_widgets - Create new DAPM widgets
+ * @dapm: DAPM context
+ * @dai: parent DAI
+ *
+ * Returns 0 on success, error code otherwise.
+ */
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai)
{
@@ -4253,6 +4267,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_new_dai_widgets);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
{