aboutsummaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
authorDavid Jander2022-06-21 08:12:33 +0200
committerMark Brown2022-06-27 13:27:25 +0100
commit69fa95905d40846756d22402690ddf5361a9d13b (patch)
tree08eb639a0785fd1564e05e2664532164917d2f28 /drivers/spi
parent72c5c59b659d54d0c824d0333a211f373316361d (diff)
spi: Ensure the io_mutex is held until spi_finalize_current_message()
This patch introduces a completion that is completed in spi_finalize_current_message() and waited for in __spi_pump_transfer_message(). This way all manipulation of ctlr->cur_msg is done with the io_mutex held and strictly ordered: __spi_pump_transfer_message() will not return until spi_finalize_current_message() is done using ctlr->cur_msg, and its calling context is only touching ctlr->cur_msg after returning. Due to this, we can safely drop the spin-locks around ctlr->cur_msg. Signed-off-by: David Jander <david@protonic.nl> Link: https://lore.kernel.org/r/20220621061234.3626638-11-david@protonic.nl Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3df84f43918c..db08cb868652 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1613,11 +1613,14 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
}
}
+ reinit_completion(&ctlr->cur_msg_completion);
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
return ret;
+ } else {
+ wait_for_completion(&ctlr->cur_msg_completion);
}
return 0;
@@ -1704,6 +1707,12 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+
+ if (!ret)
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
@@ -1897,12 +1906,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
{
struct spi_transfer *xfer;
struct spi_message *mesg;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
@@ -1936,20 +1942,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg->prepared = false;
- if (!mesg->sync) {
- /*
- * This message was sent via the async message queue. Handle
- * the queue and kick the worker thread to do the
- * idling/shutdown or send the next message if needed.
- */
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- WARN(ctlr->cur_msg != mesg,
- "Finalizing queued message that is not the current head of queue!");
- ctlr->cur_msg = NULL;
- ctlr->fallback = false;
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- }
+ complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
@@ -3036,6 +3029,7 @@ int spi_register_controller(struct spi_controller *ctlr)
}
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
+ init_completion(&ctlr->cur_msg_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
@@ -3962,6 +3956,9 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
if (ret)
goto out;
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
if (!was_busy) {
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
@@ -4013,7 +4010,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* will catch those cases.
*/
if (READ_ONCE(ctlr->queue_empty)) {
- message->sync = true;
message->actual_length = 0;
message->status = -EINPROGRESS;