aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorDave Jiang2021-08-06 08:36:43 -0700
committerVinod Koul2021-08-06 21:50:00 +0530
commit9fce3b3a0ab4cad407a27b5e36603c23f1b5b278 (patch)
treefce64c5974250723be17c7108d32528b83d569ff /drivers/dma
parent15cb0321a55e12af6b8456e9ec66a11cb367a673 (diff)
dmaengine: idxd: remove interrupt flag for completion list spinlock
The list lock is never acquired in interrupt context. Therefore there is no need to disable interrupts. Remove interrupt flags for lock operations. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162826417450.3454650.3733188117742416238.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/idxd/irq.c12
-rw-r--r--drivers/dma/idxd/submit.c5
2 files changed, 7 insertions, 10 deletions
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 11addb394793..d221c2e37460 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -176,7 +176,6 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
- unsigned long flags;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
@@ -197,17 +196,16 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
complete_desc(desc, IDXD_COMPLETE_NORMAL);
} else {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
&irq_entry->work_list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
}
}
}
static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
- unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
@@ -215,9 +213,9 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* This lock protects list corruption from access of list outside of the irq handler
* thread.
*/
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
if (list_empty(&irq_entry->work_list)) {
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
return;
}
@@ -228,7 +226,7 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
}
}
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
list_for_each_entry(desc, &flist, list) {
/*
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 92ae9a157cc9..4b514c63af15 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -106,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
- unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
- spin_lock_irqsave(&ie->list_lock, flags);
+ spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
@@ -127,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (!found)
found = list_abort_desc(wq, ie, desc);
- spin_unlock_irqrestore(&ie->list_lock, flags);
+ spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);