aboutsummaryrefslogtreecommitdiff
path: root/drivers/firmware
diff options
context:
space:
mode:
authorArnd Bergmann2021-08-12 22:38:31 +0200
committerArnd Bergmann2021-08-12 22:38:31 +0200
commit1bb24be00c8c360e5e3072301d5f49eac86b384f (patch)
tree2901d4c8d7ecf7c3a8b4335d738ad761582c8b14 /drivers/firmware
parent866e1691ed5beee933c0f8c9268963c33377ede6 (diff)
parentc0397c85b53d0bc6b081ff22d0d07e8eae149bba (diff)
Merge tag 'scmi-updates-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/drivers
SCMI Updates for v5.15 The bulk of the addition this time is mainly refactoring to add support for Virtio transport for SCMI and the addition of the support itself. The refactoring includes allowing transport specific init/exit calls, making each transport as compile time configurable, supporting monotonically increasing tokens instead of using the next available free buffer index as the token for scmi messages which eases handling concurrent and out-of-order messages which is a must have for virtio transport. Virtio support itself is conformant to the virtio SCMI device spec [1]. Virtio device id 32 has been reserved for the SCMI device [2]. Other than the virtio support, there is one bug fix in the probe failure clean up path. [1] https://github.com/oasis-tcs/virtio-spec/blob/master/virtio-scmi.tex [2] https://www.oasis-open.org/committees/ballot.php?id=3496 * tag 'scmi-updates-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: firmware: arm_scmi: Use WARN_ON() to check configured transports firmware: arm_scmi: Fix boolconv.cocci warnings firmware: arm_scmi: Free mailbox channels if probe fails firmware: arm_scmi: Add virtio transport firmware: arm_scmi: Add priv parameter to scmi_rx_callback dt-bindings: arm: Add virtio transport for SCMI firmware: arm_scmi: Add optional link_supplier() transport op firmware: arm_scmi: Add message passing abstractions for transports firmware: arm_scmi: Add method to override max message number firmware: arm_scmi: Make shmem support optional for transports firmware: arm_scmi: Make SCMI transports configurable firmware: arm_scmi: Make polling mode optional firmware: arm_scmi: Make .clear_channel optional firmware: arm_scmi: Handle concurrent and out-of-order messages firmware: arm_scmi: Introduce monotonically increasing tokens firmware: arm_scmi: Add optional transport_init/exit support firmware: arm_scmi: Remove scmi_dump_header_dbg() helper firmware: arm_scmi: Add support for type handling in common functions Link: https://lore.kernel.org/r/20210811075743.707961-1-sudeep.holla@arm.com Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/Kconfig34
-rw-r--r--drivers/firmware/arm_scmi/Kconfig95
-rw-r--r--drivers/firmware/arm_scmi/Makefile8
-rw-r--r--drivers/firmware/arm_scmi/common.h113
-rw-r--r--drivers/firmware/arm_scmi/driver.c686
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c2
-rw-r--r--drivers/firmware/arm_scmi/msg.c111
-rw-r--r--drivers/firmware/arm_scmi/smc.c3
-rw-r--r--drivers/firmware/arm_scmi/virtio.c491
9 files changed, 1396 insertions, 147 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 1db738d5b301..8d41f73f5395 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -6,39 +6,7 @@
menu "Firmware Drivers"
-config ARM_SCMI_PROTOCOL
- tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
- depends on ARM || ARM64 || COMPILE_TEST
- depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY
- help
- ARM System Control and Management Interface (SCMI) protocol is a
- set of operating system-independent software interfaces that are
- used in system management. SCMI is extensible and currently provides
- interfaces for: Discovery and self-description of the interfaces
- it supports, Power domain management which is the ability to place
- a given device or domain into the various power-saving states that
- it supports, Performance management which is the ability to control
- the performance of a domain that is composed of compute engines
- such as application processors and other accelerators, Clock
- management which is the ability to set and inquire rates on platform
- managed clocks and Sensor management which is the ability to read
- sensor data, and be notified of sensor value.
-
- This protocol library provides interface for all the client drivers
- making use of the features offered by the SCMI.
-
-config ARM_SCMI_POWER_DOMAIN
- tristate "SCMI power domain driver"
- depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
- default y
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the SCMI power domains which can be
- enabled or disabled via the SCP firmware
-
- This driver can also be built as a module. If so, the module
- will be called scmi_pm_domain. Note this may needed early in boot
- before rootfs may be available.
+source "drivers/firmware/arm_scmi/Kconfig"
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
new file mode 100644
index 000000000000..7f4d2435503b
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ARM System Control and Management Interface Protocol"
+
+config ARM_SCMI_PROTOCOL
+ tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
+ depends on ARM || ARM64 || COMPILE_TEST
+ help
+ ARM System Control and Management Interface (SCMI) protocol is a
+ set of operating system-independent software interfaces that are
+ used in system management. SCMI is extensible and currently provides
+ interfaces for: Discovery and self-description of the interfaces
+ it supports, Power domain management which is the ability to place
+ a given device or domain into the various power-saving states that
+ it supports, Performance management which is the ability to control
+ the performance of a domain that is composed of compute engines
+ such as application processors and other accelerators, Clock
+ management which is the ability to set and inquire rates on platform
+ managed clocks and Sensor management which is the ability to read
+ sensor data, and be notified of sensor value.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCMI.
+
+if ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_HAVE_TRANSPORT
+ bool
+ help
+ This declares whether at least one SCMI transport has been configured.
+ Used to trigger a build bug when trying to build SCMI without any
+ configured transport.
+
+config ARM_SCMI_HAVE_SHMEM
+ bool
+ help
+ This declares whether a shared memory based transport for SCMI is
+ available.
+
+config ARM_SCMI_HAVE_MSG
+ bool
+ help
+ This declares whether a message passing based transport for SCMI is
+ available.
+
+config ARM_SCMI_TRANSPORT_MAILBOX
+ bool "SCMI transport based on Mailbox"
+ depends on MAILBOX
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable mailbox based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on mailboxes, answer Y.
+
+config ARM_SCMI_TRANSPORT_SMC
+ bool "SCMI transport based on SMC"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable SMC based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on SMC, answer Y.
+
+config ARM_SCMI_TRANSPORT_VIRTIO
+ bool "SCMI transport based on VirtIO"
+ depends on VIRTIO
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_MSG
+ help
+ This enables the virtio based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on VirtIO, answer Y.
+
+endif #ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_POWER_DOMAIN
+ tristate "SCMI power domain driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCMI power domains which can be
+ enabled or disabled via the SCP firmware
+
+ This driver can also be built as a module. If so, the module
+ will be called scmi_pm_domain. Note this may needed early in boot
+ before rootfs may be available.
+
+endmenu
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 6a2ef63306d6..1dcf123d64ab 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
scmi-bus-y = bus.o
scmi-driver-y = driver.o notify.o
-scmi-transport-y = shmem.o
-scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
-scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 8685619d38f9..dea1bfbe1052 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -14,8 +14,12 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/refcount.h>
#include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/unaligned.h>
@@ -65,11 +69,22 @@ struct scmi_msg_resp_prot_version {
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+/*
+ * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
+ * order to minimize space and collisions, this should equal max_msg, i.e. the
+ * maximum number of in-flight messages on a specific platform, but such value
+ * is only available at runtime while kernel hashtables are statically sized:
+ * pick instead as a fixed static size the maximum number of entries that can
+ * fit the whole table into one 4k page.
+ */
+#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
+
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
* @id: The identifier of the message being sent
* @protocol_id: The identifier of the protocol used to send @id message
+ * @type: The SCMI type for this message
* @seq: The token to identify the message. When a message returns, the
* platform returns the whole message header unmodified including the
* token
@@ -80,6 +95,7 @@ struct scmi_msg_resp_prot_version {
struct scmi_msg_hdr {
u8 id;
u8 protocol_id;
+ u8 type;
u16 seq;
u32 status;
bool poll_completion;
@@ -89,13 +105,14 @@ struct scmi_msg_hdr {
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
- * protocol id and sequence id.
+ * protocol id, sequence id and type.
*
* Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
}
@@ -110,6 +127,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
{
hdr->id = MSG_XTRACT_ID(msg_hdr);
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+ hdr->type = MSG_XTRACT_TYPE(msg_hdr);
}
/**
@@ -134,6 +152,27 @@ struct scmi_msg {
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
* @async_done: pointer to delayed response message received event completion
+ * @pending: True for xfers added to @pending_xfers hashtable
+ * @node: An hlist_node reference used to store this xfer, alternatively, on
+ * the free list @free_xfers or in the @pending_xfers hashtable
+ * @users: A refcount to track the active users for this xfer.
+ * This is meant to protect against the possibility that, when a command
+ * transaction times out concurrently with the reception of a valid
+ * response message, the xfer could be finally put on the TX path, and
+ * so vanish, while on the RX path scmi_rx_callback() is still
+ * processing it: in such a case this refcounting will ensure that, even
+ * though the timed-out transaction will anyway cause the command
+ * request to be reported as failed by time-out, the underlying xfer
+ * cannot be discarded and possibly reused until the last one user on
+ * the RX path has released it.
+ * @busy: An atomic flag to ensure exclusive write access to this xfer
+ * @state: The current state of this transfer, with states transitions deemed
+ * valid being:
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
+ * (Missing synchronous response is assumed OK and ignored)
+ * @lock: A spinlock to protect state and busy fields.
+ * @priv: A pointer for transport private usage.
*/
struct scmi_xfer {
int transfer_id;
@@ -142,8 +181,36 @@ struct scmi_xfer {
struct scmi_msg rx;
struct completion done;
struct completion *async_done;
+ bool pending;
+ struct hlist_node node;
+ refcount_t users;
+#define SCMI_XFER_FREE 0
+#define SCMI_XFER_BUSY 1
+ atomic_t busy;
+#define SCMI_XFER_SENT_OK 0
+#define SCMI_XFER_RESP_OK 1
+#define SCMI_XFER_DRESP_OK 2
+ int state;
+ /* A lock to protect state and busy fields */
+ spinlock_t lock;
+ void *priv;
};
+/*
+ * An helper macro to lookup an xfer from the @pending_xfers hashtable
+ * using the message sequence number token as a key.
+ */
+#define XFER_FIND(__ht, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ struct scmi_xfer *xfer_ = NULL; \
+ \
+ hash_for_each_possible((__ht), xfer_, node, k_) \
+ if (xfer_->hdr.seq == k_) \
+ break; \
+ xfer_; \
+})
+
struct scmi_xfer_ops;
/**
@@ -283,9 +350,13 @@ struct scmi_chan_info {
/**
* struct scmi_transport_ops - Structure representing a SCMI transport ops
*
+ * @link_supplier: Optional callback to add link to a supplier device
* @chan_available: Callback to check if channel is available or not
* @chan_setup: Callback to allocate and setup a channel
* @chan_free: Callback to free a channel
+ * @get_max_msg: Optional callback to provide max_msg dynamically
+ * Returns the maximum number of messages for the channel type
+ * (tx or rx) that can be pending simultaneously in the system
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
@@ -294,10 +365,12 @@ struct scmi_chan_info {
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
+ int (*link_supplier)(struct device *dev);
bool (*chan_available)(struct device *dev, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
+ unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
@@ -317,25 +390,39 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
/**
* struct scmi_desc - Description of SoC integration
*
+ * @transport_init: An optional function that a transport can provide to
+ * initialize some transport-specific setup during SCMI core
+ * initialization, so ahead of SCMI core probing.
+ * @transport_exit: An optional function that a transport can provide to
+ * de-initialize some transport-specific setup during SCMI core
+ * de-initialization, so after SCMI core removal.
* @ops: Pointer to the transport specific ops structure
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
- * @max_msg: Maximum number of messages that can be pending
- * simultaneously in the system
+ * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
+ * be pending simultaneously in the system. May be overridden by the
+ * get_max_msg op.
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct scmi_desc {
+ int (*transport_init)(void);
+ void (*transport_exit)(void);
const struct scmi_transport_ops *ops;
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
};
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
extern const struct scmi_desc scmi_mailbox_desc;
-#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
extern const struct scmi_desc scmi_smc_desc;
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+extern const struct scmi_desc scmi_virtio_desc;
+#endif
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
@@ -352,8 +439,22 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+/* declarations for message passing transports */
+struct scmi_msg_payld;
+
+/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
+#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
+
+size_t msg_response_size(struct scmi_xfer *xfer);
+size_t msg_command_size(struct scmi_xfer *xfer);
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
+u32 msg_read_header(struct scmi_msg_payld *msg);
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+ struct scmi_xfer *xfer);
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer);
+
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv);
void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
-
#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 9b2e8d42a992..b406b3f78f46 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/hashtable.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -67,16 +68,23 @@ struct scmi_requested_dev {
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
- * @xfer_block: Preallocated Message array
* @xfer_alloc_table: Bitmap table for allocated messages.
* Index of this bitmap table is also used for message
* sequence identifier.
* @xfer_lock: Protection for message allocation
+ * @max_msg: Maximum number of messages that can be pending
+ * @free_xfers: A free list for available to use xfers. It is initialized with
+ * a number of xfers equal to the maximum allowed in-flight
+ * messages.
+ * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
+ * currently in-flight messages.
*/
struct scmi_xfers_info {
- struct scmi_xfer *xfer_block;
unsigned long *xfer_alloc_table;
spinlock_t xfer_lock;
+ int max_msg;
+ struct hlist_head free_xfers;
+ DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
};
/**
@@ -172,19 +180,6 @@ static inline int scmi_to_linux_errno(int errno)
return -EIO;
}
-/**
- * scmi_dump_header_dbg() - Helper to dump a message header.
- *
- * @dev: Device pointer corresponding to the SCMI entity
- * @hdr: pointer to header.
- */
-static inline void scmi_dump_header_dbg(struct device *dev,
- struct scmi_msg_hdr *hdr)
-{
- dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
- hdr->id, hdr->seq, hdr->protocol_id);
-}
-
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv)
{
@@ -205,45 +200,189 @@ void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
}
/**
+ * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ *
+ * Pick the next unused monotonically increasing token and set it into
+ * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
+ * reuse of freshly completed or timed-out xfers, thus mitigating the risk
+ * of incorrect association of a late and expired xfer with a live in-flight
+ * transaction, both happening to re-use the same token identifier.
+ *
+ * Since platform is NOT required to answer our request in-order we should
+ * account for a few rare but possible scenarios:
+ *
+ * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
+ * using find_next_zero_bit() starting from candidate next_token bit
+ *
+ * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
+ * are plenty of free tokens at start, so try a second pass using
+ * find_next_zero_bit() and starting from 0.
+ *
+ * X = used in-flight
+ *
+ * Normal
+ * ------
+ *
+ * |- xfer_id picked
+ * -----------+----------------------------------------------------------
+ * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ * Out-of-order pending at start
+ * -----------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ *
+ * Out-of-order pending at end
+ * ---------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: 0 on Success or error
+ */
+static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
+{
+ unsigned long xfer_id, next_token;
+
+ /*
+ * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
+ * using the pre-allocated transfer_id as a base.
+ * Note that the global transfer_id is shared across all message types
+ * so there could be holes in the allocated set of monotonic sequence
+ * numbers, but that is going to limit the effectiveness of the
+ * mitigation only in very rare limit conditions.
+ */
+ next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
+
+ /* Pick the next available xfer_id >= next_token */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, next_token);
+ if (xfer_id == MSG_TOKEN_MAX) {
+ /*
+ * After heavily out-of-order responses, there are no free
+ * tokens ahead, but only at start of xfer_alloc_table so
+ * try again from the beginning.
+ */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, 0);
+ /*
+ * Something is wrong if we got here since there can be a
+ * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
+ * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
+ */
+ if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
+ return -ENOMEM;
+ }
+
+ /* Update +/- last_token accordingly if we skipped some hole */
+ if (xfer_id != next_token)
+ atomic_add((int)(xfer_id - next_token), &transfer_last_id);
+
+ /* Set in-flight */
+ set_bit(xfer_id, minfo->xfer_alloc_table);
+ xfer->hdr.seq = (u16)xfer_id;
+
+ return 0;
+}
+
+/**
+ * scmi_xfer_token_clear - Release the token
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ */
+static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
+{
+ clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+}
+
+/**
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @set_pending: If true a monotonic token is picked and the xfer is added to
+ * the pending hash table.
*
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
- * This function can sleep depending on pending requests already in the system
- * for the SCMI entity. Further, this also holds a spinlock to maintain
- * integrity of internal data structures.
+ * Picks an xfer from the free list @free_xfers (if any available) and, if
+ * required, sets a monotonically increasing token and stores the inflight xfer
+ * into the @pending_xfers hashtable for later retrieval.
+ *
+ * The successfully initialized xfer is refcounted.
+ *
+ * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
+ * @free_xfers.
*
* Return: 0 if all went fine, else corresponding error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
- struct scmi_xfers_info *minfo)
+ struct scmi_xfers_info *minfo,
+ bool set_pending)
{
- u16 xfer_id;
+ int ret;
+ unsigned long flags;
struct scmi_xfer *xfer;
- unsigned long flags, bit_pos;
- struct scmi_info *info = handle_to_scmi_info(handle);
- /* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
- bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
- info->desc->max_msg);
- if (bit_pos == info->desc->max_msg) {
+ if (hlist_empty(&minfo->free_xfers)) {
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return ERR_PTR(-ENOMEM);
}
- set_bit(bit_pos, minfo->xfer_alloc_table);
- spin_unlock_irqrestore(&minfo->xfer_lock, flags);
- xfer_id = bit_pos;
+ /* grab an xfer from the free_list */
+ xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
+ hlist_del_init(&xfer->node);
- xfer = &minfo->xfer_block[xfer_id];
- xfer->hdr.seq = xfer_id;
+ /*
+ * Allocate transfer_id early so that can be used also as base for
+ * monotonic sequence number generation if needed.
+ */
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
+ if (set_pending) {
+ /* Pick and set monotonic token */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret) {
+ hash_add(minfo->pending_xfers, &xfer->node,
+ xfer->hdr.seq);
+ xfer->pending = true;
+ } else {
+ dev_err(handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ hlist_add_head(&xfer->node, &minfo->free_xfers);
+ xfer = ERR_PTR(ret);
+ }
+ }
+
+ if (!IS_ERR(xfer)) {
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
+ }
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
return xfer;
}
@@ -253,6 +392,9 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get
*
+ * After refcount check, possibly release an xfer, clearing the token slot,
+ * removing xfer from @pending_xfers and putting it back into free_xfers.
+ *
* This holds a spinlock to maintain integrity of internal data structures.
*/
static void
@@ -260,17 +402,215 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
unsigned long flags;
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (refcount_dec_and_test(&xfer->users)) {
+ if (xfer->pending) {
+ scmi_xfer_token_clear(minfo, xfer);
+ hash_del(&xfer->node);
+ xfer->pending = false;
+ }
+ hlist_add_head(&xfer->node, &minfo->free_xfers);
+ }
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+/**
+ * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer_id: Token ID to lookup in @pending_xfers
+ *
+ * Refcounting is untouched.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: A valid xfer on Success or error otherwise
+ */
+static struct scmi_xfer *
+scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
+{
+ struct scmi_xfer *xfer = NULL;
+
+ if (test_bit(xfer_id, minfo->xfer_alloc_table))
+ xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
+
+ return xfer ?: ERR_PTR(-EINVAL);
+}
+
+/**
+ * scmi_msg_response_validate - Validate message type against state of related
+ * xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_type: Message type to check
+ * @xfer: A reference to the xfer to validate against @msg_type
+ *
+ * This function checks if @msg_type is congruent with the current state of
+ * a pending @xfer; if an asynchronous delayed response is received before the
+ * related synchronous response (Out-of-Order Delayed Response) the missing
+ * synchronous response is assumed to be OK and completed, carrying on with the
+ * Delayed Response: this is done to address the case in which the underlying
+ * SCMI transport can deliver such out-of-order responses.
+ *
+ * Context: Assumes to be called with xfer->lock already acquired.
+ *
+ * Return: 0 on Success, error otherwise
+ */
+static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
+ u8 msg_type,
+ struct scmi_xfer *xfer)
+{
/*
- * Keep the locked section as small as possible
- * NOTE: we might escape with smp_mb and no lock here..
- * but just be conservative and symmetric.
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
*/
+ if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
+ dev_err(cinfo->dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer->hdr.seq);
+ return -EINVAL;
+ }
+
+ switch (xfer->state) {
+ case SCMI_XFER_SENT_OK:
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ /*
+ * Delayed Response expected but delivered earlier.
+ * Assume message RESPONSE was OK and skip state.
+ */
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->state = SCMI_XFER_RESP_OK;
+ complete(&xfer->done);
+ dev_warn(cinfo->dev,
+ "Received valid OoO Delayed Response for %d\n",
+ xfer->hdr.seq);
+ }
+ break;
+ case SCMI_XFER_RESP_OK:
+ if (msg_type != MSG_TYPE_DELAYED_RESP)
+ return -EINVAL;
+ break;
+ case SCMI_XFER_DRESP_OK:
+ /* No further message expected once in SCMI_XFER_DRESP_OK */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_xfer_state_update - Update xfer state
+ *
+ * @xfer: A reference to the xfer to update
+ * @msg_type: Type of message being processed.
+ *
+ * Note that this message is assumed to have been already successfully validated
+ * by @scmi_msg_response_validate(), so here we just update the state.
+ *
+ * Context: Assumes to be called on an xfer exclusively acquired using the
+ * busy flag.
+ */
+static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
+{
+ xfer->hdr.type = msg_type;
+
+ /* Unknown command types were already discarded earlier */
+ if (xfer->hdr.type == MSG_TYPE_COMMAND)
+ xfer->state = SCMI_XFER_RESP_OK;
+ else
+ xfer->state = SCMI_XFER_DRESP_OK;
+}
+
+static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
+{
+ int ret;
+
+ ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
+
+ return ret == SCMI_XFER_FREE;
+}
+
+/**
+ * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_hdr: A message header to use as lookup key
+ *
+ * When a valid xfer is found for the sequence number embedded in the provided
+ * msg_hdr, reference counting is properly updated and exclusive access to this
+ * xfer is granted till released with @scmi_xfer_command_release.
+ *
+ * Return: A valid @xfer on Success or error otherwise.
+ */
+static inline struct scmi_xfer *
+scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+ /* Are we even expecting this? */
spin_lock_irqsave(&minfo->xfer_lock, flags);
- clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
+ if (IS_ERR(xfer)) {
+ dev_err(cinfo->dev,
+ "Message for %d type %d is not expected!\n",
+ xfer_id, msg_type);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+ return xfer;
+ }
+ refcount_inc(&xfer->users);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
+ /*
+ * If a pending xfer was found which was also in a congruent state with
+ * the received message, acquire exclusive access to it setting the busy
+ * flag.
+ * Spins only on the rare limit condition of concurrent reception of
+ * RESP and DRESP for the same xfer.
+ */
+ if (!ret) {
+ spin_until_cond(scmi_xfer_acquired(xfer));
+ scmi_xfer_state_update(xfer, msg_type);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ if (ret) {
+ dev_err(cinfo->dev,
+ "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
+ msg_type, xfer_id, msg_hdr, xfer->state);
+ /* On error the refcount incremented above has to be dropped */
+ __scmi_xfer_put(minfo, xfer);
+ xfer = ERR_PTR(-EINVAL);
+ }
+
+ return xfer;
+}
+
+static inline void scmi_xfer_command_release(struct scmi_info *info,
+ struct scmi_xfer *xfer)
+{
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+static inline void scmi_clear_channel(struct scmi_info *info,
+ struct scmi_chan_info *cinfo)
+{
+ if (info->desc->ops->clear_channel)
+ info->desc->ops->clear_channel(cinfo);
}
-static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static void scmi_handle_notification(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
{
struct scmi_xfer *xfer;
struct device *dev = cinfo->dev;
@@ -279,16 +619,17 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
ktime_t ts;
ts = ktime_get_boottime();
- xfer = scmi_xfer_get(cinfo->handle, minfo);
+ xfer = scmi_xfer_get(cinfo->handle, minfo, false);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
- info->desc->ops->clear_channel(cinfo);
+ scmi_clear_channel(info, cinfo);
return;
}
unpack_scmi_header(msg_hdr, &xfer->hdr);
- scmi_dump_header_dbg(dev, &xfer->hdr);
+ if (priv)
+ xfer->priv = priv;
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
@@ -300,59 +641,41 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
__scmi_xfer_put(minfo, xfer);
- info->desc->ops->clear_channel(cinfo);
+ scmi_clear_channel(info, cinfo);
}
static void scmi_handle_response(struct scmi_chan_info *cinfo,
- u16 xfer_id, u8 msg_type)
+ u32 msg_hdr, void *priv)
{
struct scmi_xfer *xfer;
- struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->tx_minfo;
- /* Are we even expecting this? */
- if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
- dev_err(dev, "message for %d is not expected!\n", xfer_id);
- info->desc->ops->clear_channel(cinfo);
- return;
- }
-
- xfer = &minfo->xfer_block[xfer_id];
- /*
- * Even if a response was indeed expected on this slot at this point,
- * a buggy platform could wrongly reply feeding us an unexpected
- * delayed response we're not prepared to handle: bail-out safely
- * blaming firmware.
- */
- if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
- dev_err(dev,
- "Delayed Response for %d not expected! Buggy F/W ?\n",
- xfer_id);
- info->desc->ops->clear_channel(cinfo);
- /* It was unexpected, so nobody will clear the xfer if not us */
- __scmi_xfer_put(minfo, xfer);
+ xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
+ if (IS_ERR(xfer)) {
+ scmi_clear_channel(info, cinfo);
return;
}
/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
- if (msg_type == MSG_TYPE_DELAYED_RESP)
+ if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
xfer->rx.len = info->desc->max_msg_size;
- scmi_dump_header_dbg(dev, &xfer->hdr);
-
+ if (priv)
+ xfer->priv = priv;
info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
- msg_type);
+ xfer->hdr.type);
- if (msg_type == MSG_TYPE_DELAYED_RESP) {
- info->desc->ops->clear_channel(cinfo);
+ if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
+ scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
} else {
complete(&xfer->done);
}
+
+ scmi_xfer_command_release(info, xfer);
}
/**
@@ -360,6 +683,7 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
*
* @cinfo: SCMI channel info
* @msg_hdr: Message header
+ * @priv: Transport specific private data.
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
@@ -367,18 +691,17 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
{
- u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
switch (msg_type) {
case MSG_TYPE_NOTIFICATION:
- scmi_handle_notification(cinfo, msg_hdr);
+ scmi_handle_notification(cinfo, msg_hdr, priv);
break;
case MSG_TYPE_COMMAND:
case MSG_TYPE_DELAYED_RESP:
- scmi_handle_response(cinfo, xfer_id, msg_type);
+ scmi_handle_response(cinfo, msg_hdr, priv);
break;
default:
WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
@@ -390,7 +713,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
* xfer_put() - Release a transmit message
*
* @ph: Pointer to SCMI protocol handle
- * @xfer: message that was reserved by scmi_xfer_get
+ * @xfer: message that was reserved by xfer_get_init
*/
static void xfer_put(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
@@ -408,7 +731,12 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ /*
+ * Poll also on xfer->done so that polling can be forcibly terminated
+ * in case of out-of-order receptions of delayed responses
+ */
return info->desc->ops->poll_done(cinfo, xfer) ||
+ try_wait_for_completion(&xfer->done) ||
ktime_after(ktime_get(), stop);
}
@@ -432,6 +760,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
+ if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
+ dev_warn_once(dev,
+ "Polling mode is not supported by transport.\n");
+ return -EINVAL;
+ }
+
/*
* Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
@@ -448,6 +782,16 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
+ xfer->state = SCMI_XFER_SENT_OK;
+ /*
+ * Even though spinlocking is not needed here since no race is possible
+ * on xfer->state due to the monotonically increasing tokens allocation,
+ * we must anyway ensure xfer->state initialization is not re-ordered
+ * after the .send_message() to be sure that on the RX path an early
+ * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
+ */
+ smp_mb();
+
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
@@ -458,11 +802,22 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
-
- if (ktime_before(ktime_get(), stop))
- info->desc->ops->fetch_response(cinfo, xfer);
- else
+ if (ktime_before(ktime_get(), stop)) {
+ unsigned long flags;
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ info->desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ } else {
ret = -ETIMEDOUT;
+ }
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
@@ -557,7 +912,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(pi->handle, minfo);
+ xfer = scmi_xfer_get(pi->handle, minfo, true);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -566,6 +921,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+ xfer->hdr.type = MSG_TYPE_COMMAND;
xfer->hdr.id = msg_id;
xfer->hdr.poll_completion = false;
@@ -1026,25 +1382,32 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */
- if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
+ if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
dev_err(dev,
"Invalid maximum messages %d, not in range [1 - %lu]\n",
- desc->max_msg, MSG_TOKEN_MAX);
+ info->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
- info->xfer_block = devm_kcalloc(dev, desc->max_msg,
- sizeof(*info->xfer_block), GFP_KERNEL);
- if (!info->xfer_block)
- return -ENOMEM;
+ hash_init(info->pending_xfers);
- info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
+ /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
+ info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
sizeof(long), GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
- /* Pre-initialize the buffer pointer to pre-allocated buffers */
- for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+ /*
+ * Preallocate a number of xfers equal to max inflight messages,
+ * pre-initialize the buffer pointer to pre-allocated buffers and
+ * attach all of them to the free list
+ */
+ INIT_HLIST_HEAD(&info->free_xfers);
+ for (i = 0; i < info->max_msg; i++) {
+ xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
+ if (!xfer)
+ return -ENOMEM;
+
xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
GFP_KERNEL);
if (!xfer->rx.buf)
@@ -1052,6 +1415,10 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
xfer->tx.buf = xfer->rx.buf;
init_completion(&xfer->done);
+ spin_lock_init(&xfer->lock);
+
+ /* Add initialized xfer to the free list */
+ hlist_add_head(&xfer->node, &info->free_xfers);
}
spin_lock_init(&info->xfer_lock);
@@ -1059,10 +1426,40 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
return 0;
}
+static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
+{
+ const struct scmi_desc *desc = sinfo->desc;
+
+ if (!desc->ops->get_max_msg) {
+ sinfo->tx_minfo.max_msg = desc->max_msg;
+ sinfo->rx_minfo.max_msg = desc->max_msg;
+ } else {
+ struct scmi_chan_info *base_cinfo;
+
+ base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!base_cinfo)
+ return -EINVAL;
+ sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
+
+ /* RX channel is optional so can be skipped */
+ base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
+ if (base_cinfo)
+ sinfo->rx_minfo.max_msg =
+ desc->ops->get_max_msg(base_cinfo);
+ }
+
+ return 0;
+}
+
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
- int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+ int ret;
+
+ ret = scmi_channels_max_msg_configure(sinfo);
+ if (ret)
+ return ret;
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
@@ -1390,6 +1787,21 @@ void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
mutex_unlock(&scmi_requested_devices_mtx);
}
+static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+{
+ int ret;
+ struct idr *idr = &info->tx_idr;
+
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ idr_destroy(&info->tx_idr);
+
+ idr = &info->rx_idr;
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ idr_destroy(&info->rx_idr);
+
+ return ret;
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -1424,13 +1836,19 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
+ if (desc->ops->link_supplier) {
+ ret = desc->ops->link_supplier(dev);
+ if (ret)
+ return ret;
+ }
+
ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
ret = scmi_xfer_info_init(info);
if (ret)
- return ret;
+ goto clear_txrx_setup;
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
@@ -1443,7 +1861,7 @@ static int scmi_probe(struct platform_device *pdev)
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
- return ret;
+ goto notification_exit;
}
mutex_lock(&scmi_list_mutex);
@@ -1482,6 +1900,12 @@ static int scmi_probe(struct platform_device *pdev)
}
return 0;
+
+notification_exit:
+ scmi_notification_exit(&info->handle);
+clear_txrx_setup:
+ scmi_cleanup_txrx_channels(info);
+ return ret;
}
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
@@ -1493,7 +1917,6 @@ static int scmi_remove(struct platform_device *pdev)
{
int ret = 0, id;
struct scmi_info *info = platform_get_drvdata(pdev);
- struct idr *idr = &info->tx_idr;
struct device_node *child;
mutex_lock(&scmi_list_mutex);
@@ -1517,14 +1940,7 @@ static int scmi_remove(struct platform_device *pdev)
idr_destroy(&info->active_protocols);
/* Safe to free channels since no more users */
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->tx_idr);
-
- idr = &info->rx_idr;
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
- idr_destroy(&info->rx_idr);
-
- return ret;
+ return scmi_cleanup_txrx_channels(info);
}
static ssize_t protocol_version_show(struct device *dev,
@@ -1575,12 +1991,15 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
-#ifdef CONFIG_MAILBOX
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif
-#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+ { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
+#endif
{ /* Sentinel */ },
};
@@ -1596,10 +2015,69 @@ static struct platform_driver scmi_driver = {
.remove = scmi_remove,
};
+/**
+ * __scmi_transports_setup - Common helper to call transport-specific
+ * .init/.exit code if provided.
+ *
+ * @init: A flag to distinguish between init and exit.
+ *
+ * Note that, if provided, we invoke .init/.exit functions for all the
+ * transports currently compiled in.
+ *
+ * Return: 0 on Success.
+ */
+static inline int __scmi_transports_setup(bool init)
+{
+ int ret = 0;
+ const struct of_device_id *trans;
+
+ for (trans = scmi_of_match; trans->data; trans++) {
+ const struct scmi_desc *tdesc = trans->data;
+
+ if ((init && !tdesc->transport_init) ||
+ (!init && !tdesc->transport_exit))
+ continue;
+
+ if (init)
+ ret = tdesc->transport_init();
+ else
+ tdesc->transport_exit();
+
+ if (ret) {
+ pr_err("SCMI transport %s FAILED initialization!\n",
+ trans->compatible);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int __init scmi_transports_init(void)
+{
+ return __scmi_transports_setup(true);
+}
+
+static void __exit scmi_transports_exit(void)
+{
+ __scmi_transports_setup(false);
+}
+
static int __init scmi_driver_init(void)
{
+ int ret;
+
+ /* Bail out if no SCMI transport was configured */
+ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
+ return -EINVAL;
+
scmi_bus_init();
+ /* Initialize any compiled-in transport which provided an init/exit */
+ ret = scmi_transports_init();
+ if (ret)
+ return ret;
+
scmi_base_register();
scmi_clock_register();
@@ -1628,6 +2106,8 @@ static void __exit scmi_driver_exit(void)
scmi_bus_exit();
+ scmi_transports_exit();
+
platform_driver_unregister(&scmi_driver);
}
module_exit(scmi_driver_exit);
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index e3dcb58314ae..e09eb12bf421 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -43,7 +43,7 @@ static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
- scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
static bool mailbox_chan_available(struct device *dev, int idx)
diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c
new file mode 100644
index 000000000000..d33a704e5814
--- /dev/null
+++ b/drivers/firmware/arm_scmi/msg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transports using message passing.
+ *
+ * Derived from shm.c.
+ *
+ * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ */
+
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * struct scmi_msg_payld - Transport SDU layout
+ *
+ * The SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian format only.
+ */
+struct scmi_msg_payld {
+ __le32 msg_header;
+ __le32 msg_payload[];
+};
+
+/**
+ * msg_command_size() - Actual size of transport SDU for command.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_command_size(struct scmi_xfer *xfer)
+{
+ return sizeof(struct scmi_msg_payld) + xfer->tx.len;
+}
+
+/**
+ * msg_response_size() - Maximum size of transport SDU for response.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_response_size(struct scmi_xfer *xfer)
+{
+ return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len;
+}
+
+/**
+ * msg_tx_prepare() - Set up transport SDU for command.
+ *
+ * @msg: transport SDU for command
+ * @xfer: message which is being sent
+ */
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
+{
+ msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->tx.buf)
+ memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+/**
+ * msg_read_header() - Read SCMI header from transport SDU.
+ *
+ * @msg: transport SDU
+ *
+ * Return: SCMI header
+ */
+u32 msg_read_header(struct scmi_msg_payld *msg)
+{
+ return le32_to_cpu(msg->msg_header);
+}
+
+/**
+ * msg_fetch_response() - Fetch response SCMI payload from transport SDU.
+ *
+ * @msg: transport SDU with response
+ * @len: transport SDU size
+ * @xfer: message being responded to
+ */
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+ struct scmi_xfer *xfer)
+{
+ size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]);
+
+ xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]);
+ xfer->rx.len = min_t(size_t, xfer->rx.len,
+ len >= prefix_len ? len - prefix_len : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len);
+}
+
+/**
+ * msg_fetch_notification() - Fetch notification payload from transport SDU.
+ *
+ * @msg: transport SDU with notification
+ * @len: transport SDU size
+ * @max_len: maximum SCMI payload size to fetch
+ * @xfer: notification message
+ */
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ xfer->rx.len = min_t(size_t, max_len,
+ len >= sizeof(*msg) ? len - sizeof(*msg) : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len);
+}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index bed5596c7209..4effecc3bb46 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -154,7 +154,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
if (scmi_info->irq)
wait_for_completion(&scmi_info->tx_complete);
- scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+ scmi_rx_callback(scmi_info->cinfo,
+ shmem_read_header(scmi_info->shmem), NULL);
mutex_unlock(&scmi_info->shmem_lock);
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
new file mode 100644
index 000000000000..224577f86928
--- /dev/null
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Virtio Transport driver for Arm System Control and Management Interface
+ * (SCMI).
+ *
+ * Copyright (C) 2020-2021 OpenSynergy.
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of Operation
+ *
+ * The scmi-virtio transport implements a driver for the virtio SCMI device.
+ *
+ * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
+ * channel (virtio eventq, P2A channel). Each channel is implemented through a
+ * virtqueue. Access to each virtqueue is protected by spinlocks.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_scmi.h>
+
+#include "common.h"
+
+#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
+#define VIRTIO_SCMI_MAX_PDU_SIZE \
+ (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define DESCRIPTORS_PER_TX_MSG 2
+
+/**
+ * struct scmi_vio_channel - Transport channel information
+ *
+ * @vqueue: Associated virtqueue
+ * @cinfo: SCMI Tx or Rx channel
+ * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
+ * @is_rx: Whether channel is an Rx channel
+ * @ready: Whether transport user is ready to hear about channel
+ * @max_msg: Maximum number of pending messages for this channel.
+ * @lock: Protects access to all members except ready.
+ * @ready_lock: Protects access to ready. If required, it must be taken before
+ * lock.
+ */
+struct scmi_vio_channel {
+ struct virtqueue *vqueue;
+ struct scmi_chan_info *cinfo;
+ struct list_head free_list;
+ bool is_rx;
+ bool ready;
+ unsigned int max_msg;
+ /* lock to protect access to all members except ready. */
+ spinlock_t lock;
+ /* lock to rotects access to ready flag. */
+ spinlock_t ready_lock;
+};
+
+/**
+ * struct scmi_vio_msg - Transport PDU information
+ *
+ * @request: SDU used for commands
+ * @input: SDU used for (delayed) responses and notifications
+ * @list: List which scmi_vio_msg may be part of
+ * @rx_len: Input SDU size in bytes, once input has been received
+ */
+struct scmi_vio_msg {
+ struct scmi_msg_payld *request;
+ struct scmi_msg_payld *input;
+ struct list_head list;
+ unsigned int rx_len;
+};
+
+/* Only one SCMI VirtIO device can possibly exist */
+static struct virtio_device *scmi_vdev;
+
+static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
+}
+
+static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ struct scatterlist sg_in;
+ int rc;
+ unsigned long flags;
+
+ sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
+ if (rc)
+ dev_err_once(vioch->cinfo->dev,
+ "failed to add to virtqueue (%d)\n", rc);
+ else
+ virtqueue_kick(vioch->vqueue);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ return rc;
+}
+
+static void scmi_finalize_message(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ if (vioch->is_rx) {
+ scmi_vio_feed_vq_rx(vioch, msg);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ list_add(&msg->list, &vioch->free_list);
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ }
+}
+
+static void scmi_vio_complete_cb(struct virtqueue *vqueue)
+{
+ unsigned long ready_flags;
+ unsigned long flags;
+ unsigned int length;
+ struct scmi_vio_channel *vioch;
+ struct scmi_vio_msg *msg;
+ bool cb_enabled = true;
+
+ if (WARN_ON_ONCE(!vqueue->vdev->priv))
+ return;
+ vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
+
+ for (;;) {
+ spin_lock_irqsave(&vioch->ready_lock, ready_flags);
+
+ if (!vioch->ready) {
+ if (!cb_enabled)
+ (void)virtqueue_enable_cb(vqueue);
+ goto unlock_ready_out;
+ }
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (cb_enabled) {
+ virtqueue_disable_cb(vqueue);
+ cb_enabled = false;
+ }
+ msg = virtqueue_get_buf(vqueue, &length);
+ if (!msg) {
+ if (virtqueue_enable_cb(vqueue))
+ goto unlock_out;
+ cb_enabled = true;
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ if (msg) {
+ msg->rx_len = length;
+ scmi_rx_callback(vioch->cinfo,
+ msg_read_header(msg->input), msg);
+
+ scmi_finalize_message(vioch, msg);
+ }
+
+ spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+ }
+
+unlock_out:
+ spin_unlock_irqrestore(&vioch->lock, flags);
+unlock_ready_out:
+ spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+}
+
+static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
+
+static vq_callback_t *scmi_vio_complete_callbacks[] = {
+ scmi_vio_complete_cb,
+ scmi_vio_complete_cb
+};
+
+static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
+{
+ struct scmi_vio_channel *vioch = base_cinfo->transport_info;
+
+ return vioch->max_msg;
+}
+
+static int virtio_link_supplier(struct device *dev)
+{
+ if (!scmi_vdev) {
+ dev_notice_once(dev,
+ "Deferring probe after not finding a bound scmi-virtio device\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (!device_link_add(dev, &scmi_vdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev, "Adding link to supplier virtio device failed\n");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static bool virtio_chan_available(struct device *dev, int idx)
+{
+ struct scmi_vio_channel *channels, *vioch = NULL;
+
+ if (WARN_ON_ONCE(!scmi_vdev))
+ return false;
+
+ channels = (struct scmi_vio_channel *)scmi_vdev->priv;
+
+ switch (idx) {
+ case VIRTIO_SCMI_VQ_TX:
+ vioch = &channels[VIRTIO_SCMI_VQ_TX];
+ break;
+ case VIRTIO_SCMI_VQ_RX:
+ if (scmi_vio_have_vq_rx(scmi_vdev))
+ vioch = &channels[VIRTIO_SCMI_VQ_RX];
+ break;
+ default:
+ return false;
+ }
+
+ return vioch && !vioch->cinfo;
+}
+
+static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch;
+ int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
+ int i;
+
+ if (!scmi_vdev)
+ return -EPROBE_DEFER;
+
+ vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
+
+ for (i = 0; i < vioch->max_msg; i++) {
+ struct scmi_vio_msg *msg;
+
+ msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (tx) {
+ msg->request = devm_kzalloc(cinfo->dev,
+ VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->request)
+ return -ENOMEM;
+ }
+
+ msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->input)
+ return -ENOMEM;
+
+ if (tx) {
+ spin_lock_irqsave(&vioch->lock, flags);
+ list_add_tail(&msg->list, &vioch->free_list);
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ } else {
+ scmi_vio_feed_vq_rx(vioch, msg);
+ }
+ }
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ cinfo->transport_info = vioch;
+ /* Indirectly setting channel not available any more */
+ vioch->cinfo = cinfo;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ spin_lock_irqsave(&vioch->ready_lock, flags);
+ vioch->ready = true;
+ spin_unlock_irqrestore(&vioch->ready_lock, flags);
+
+ return 0;
+}
+
+static int virtio_chan_free(int id, void *p, void *data)
+{
+ unsigned long flags;
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+ spin_lock_irqsave(&vioch->ready_lock, flags);
+ vioch->ready = false;
+ spin_unlock_irqrestore(&vioch->ready_lock, flags);
+
+ scmi_free_channel(cinfo, data, id);
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ vioch->cinfo = NULL;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ return 0;
+}
+
+static int virtio_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+ struct scatterlist sg_out;
+ struct scatterlist sg_in;
+ struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
+ unsigned long flags;
+ int rc;
+ struct scmi_vio_msg *msg;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ if (list_empty(&vioch->free_list)) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ return -EBUSY;
+ }
+
+ msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
+ list_del(&msg->list);
+
+ msg_tx_prepare(msg->request, xfer);
+
+ sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
+ sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+
+ rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
+ if (rc) {
+ list_add(&msg->list, &vioch->free_list);
+ dev_err_once(vioch->cinfo->dev,
+ "%s() failed to add to virtqueue (%d)\n", __func__,
+ rc);
+ } else {
+ virtqueue_kick(vioch->vqueue);
+ }
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ return rc;
+}
+
+static void virtio_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (msg) {
+ msg_fetch_response(msg->input, msg->rx_len, xfer);
+ xfer->priv = NULL;
+ }
+}
+
+static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (msg) {
+ msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
+ xfer->priv = NULL;
+ }
+}
+
+static const struct scmi_transport_ops scmi_virtio_ops = {
+ .link_supplier = virtio_link_supplier,
+ .chan_available = virtio_chan_available,
+ .chan_setup = virtio_chan_setup,
+ .chan_free = virtio_chan_free,
+ .get_max_msg = virtio_get_max_msg,
+ .send_message = virtio_send_message,
+ .fetch_response = virtio_fetch_response,
+ .fetch_notification = virtio_fetch_notification,
+};
+
+static int scmi_vio_probe(struct virtio_device *vdev)
+{
+ struct device *dev = &vdev->dev;
+ struct scmi_vio_channel *channels;
+ bool have_vq_rx;
+ int vq_cnt;
+ int i;
+ int ret;
+ struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
+
+ /* Only one SCMI VirtiO device allowed */
+ if (scmi_vdev)
+ return -EINVAL;
+
+ have_vq_rx = scmi_vio_have_vq_rx(vdev);
+ vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
+
+ channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ if (have_vq_rx)
+ channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
+
+ ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
+ scmi_vio_vqueue_names, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
+ return ret;
+ }
+
+ for (i = 0; i < vq_cnt; i++) {
+ unsigned int sz;
+
+ spin_lock_init(&channels[i].lock);
+ spin_lock_init(&channels[i].ready_lock);
+ INIT_LIST_HEAD(&channels[i].free_list);
+ channels[i].vqueue = vqs[i];
+
+ sz = virtqueue_get_vring_size(channels[i].vqueue);
+ /* Tx messages need multiple descriptors. */
+ if (!channels[i].is_rx)
+ sz /= DESCRIPTORS_PER_TX_MSG;
+
+ if (sz > MSG_TOKEN_MAX) {
+ dev_info_once(dev,
+ "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
+ channels[i].is_rx ? "rx" : "tx",
+ sz, MSG_TOKEN_MAX);
+ sz = MSG_TOKEN_MAX;
+ }
+ channels[i].max_msg = sz;
+ }
+
+ vdev->priv = channels;
+ scmi_vdev = vdev;
+
+ return 0;
+}
+
+static void scmi_vio_remove(struct virtio_device *vdev)
+{
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+ scmi_vdev = NULL;
+}
+
+static int scmi_vio_validate(struct virtio_device *vdev)
+{
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev,
+ "device does not comply with spec version 1.x\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int features[] = {
+ VIRTIO_SCMI_F_P2A_CHANNELS,
+};
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
+ { 0 }
+};
+
+static struct virtio_driver virtio_scmi_driver = {
+ .driver.name = "scmi-virtio",
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = scmi_vio_probe,
+ .remove = scmi_vio_remove,
+ .validate = scmi_vio_validate,
+};
+
+static int __init virtio_scmi_init(void)
+{
+ return register_virtio_driver(&virtio_scmi_driver);
+}
+
+static void __exit virtio_scmi_exit(void)
+{
+ unregister_virtio_driver(&virtio_scmi_driver);
+}
+
+const struct scmi_desc scmi_virtio_desc = {
+ .transport_init = virtio_scmi_init,
+ .transport_exit = virtio_scmi_exit,
+ .ops = &scmi_virtio_ops,
+ .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
+ .max_msg = 0, /* overridden by virtio_get_max_msg() */
+ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+};