aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/mvm
diff options
context:
space:
mode:
authorJohannes Berg2012-12-27 21:43:48 +0100
committerJohannes Berg2013-02-05 14:39:12 +0100
commit2bfb50924c7e92362ac937aef2ab56bc7bd3ca52 (patch)
tree26518070c19eb9cfb38ab48cda31fe16c1235433 /drivers/net/wireless/iwlwifi/mvm
parentc9f7a8ab7792b48259af6e94706a5d02dd74caef (diff)
iwlwifi: use threaded interrupt handler
With new transports coming up, move to threaded interrupt handling now. This has the advantage that we can use the same locking scheme with all different transports we may need to implement. Note that the TX path obviously still runs in a tasklet, so some spin_lock() calls need to change to spin_lock_bh() calls to properly lock out the TX path. In my test on a Calpella platform this has no impact on throughput or latency. Also add lockdep annotations to avoid lockups due to catch sending synchronous commands or using locks that connect with them from the irq thread. Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/mvm')
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c12
2 files changed, 7 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 52da375e5740..3f3ce91ad5c2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -121,7 +121,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(mvm->hw, skb);
+ ieee80211_rx_ni(mvm->hw, skb);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index cada8efe0cca..6b67ce3f679c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -620,7 +620,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_ni(mvm->hw, skb);
}
if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
@@ -663,12 +663,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
- spin_lock(&mvmsta->lock);
+ spin_lock_bh(&mvmsta->lock);
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
next_reclaimed);
iwl_mvm_check_ratid_empty(mvm, sta, tid);
- spin_unlock(&mvmsta->lock);
+ spin_unlock_bh(&mvmsta->lock);
}
#ifdef CONFIG_PM_SLEEP
@@ -832,7 +832,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
- spin_lock(&mvmsta->lock);
+ spin_lock_bh(&mvmsta->lock);
__skb_queue_head_init(&reclaimed_skbs);
@@ -886,13 +886,13 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
}
}
- spin_unlock(&mvmsta->lock);
+ spin_unlock_bh(&mvmsta->lock);
rcu_read_unlock();
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_ni(mvm->hw, skb);
}
return 0;