From 9b5bd5a4917eeb5eca00d1842a74186cfc8dd1c6 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Wed, 20 Feb 2013 09:41:08 -0800 Subject: mac80211: stop timers before canceling work items Re-order the quiesce code so that timers are always stopped before work-items are flushed. This was not the problem I saw, but I think it may still be more correct. Signed-off-by: Ben Greear Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..6044c6d87e4c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3502,6 +3502,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* + * Stop timers before deleting work items, as timers + * could race and re-add the work-items. They will be + * re-established on connection. + */ + del_timer_sync(&ifmgd->conn_mon_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); + /* * we need to use atomic bitops for the running bits * only because both timers might fire at the same @@ -3516,13 +3524,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) if (del_timer_sync(&ifmgd->timer)) set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); - cancel_work_sync(&ifmgd->chswitch_work); if (del_timer_sync(&ifmgd->chswitch_timer)) set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - - /* these will just be re-established on connection */ - del_timer_sync(&ifmgd->conn_mon_timer); - del_timer_sync(&ifmgd->bcn_mon_timer); + cancel_work_sync(&ifmgd->chswitch_work); } void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) -- cgit v1.2.3 From 499218595a2e8296b7492af32fcca141b7b8184a Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Wed, 20 Feb 2013 09:41:09 -0800 Subject: mac80211: Fix crash due to un-canceled work-items Some mlme work structs are not cancelled on disassociation nor interface deletion, which leads to them running after the memory has been freed There is not a clean way to cancel these in the disassociation logic because they must be canceled outside of the ifmgd->mtx lock, so just cancel them in mgd_stop logic that tears down the station. This fixes the crashes we see in 3.7.9+. The crash stack trace itself isn't so helpful, but this warning gives more useful info: WARNING: at /home/greearb/git/linux-3.7.dev.y/lib/debugobjects.c:261 debug_print_object+0x7c/0x8d() ODEBUG: free active (active state 0) object type: work_struct hint: ieee80211_sta_monitor_work+0x0/0x14 [mac80211] Modules linked in: [...] Pid: 14743, comm: iw Tainted: G C O 3.7.9+ #11 Call Trace: [] warn_slowpath_common+0x80/0x98 [] warn_slowpath_fmt+0x41/0x43 [] debug_print_object+0x7c/0x8d [] debug_check_no_obj_freed+0x95/0x1c3 [] slab_free_hook+0x70/0x79 [] kfree+0x62/0xb7 [] netdev_release+0x39/0x3e [] device_release+0x52/0x8a [] kobject_release+0x121/0x158 [] kobject_put+0x4c/0x50 [] netdev_run_todo+0x25c/0x27e Cc: stable@vger.kernel.org Signed-off-by: Ben Greear Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6044c6d87e4c..b756d2924690 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4319,6 +4319,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + /* + * Make sure some work items will not run after this, + * they will not do anything but might not have been + * cancelled when disconnecting. + */ + cancel_work_sync(&ifmgd->monitor_work); + cancel_work_sync(&ifmgd->beacon_connection_loss_work); + cancel_work_sync(&ifmgd->request_smps_work); + cancel_work_sync(&ifmgd->csa_connection_drop_work); + cancel_work_sync(&ifmgd->chswitch_work); + mutex_lock(&ifmgd->mtx); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); -- cgit v1.2.3 From d0ae708d1acd4bf6ad5b9937d9da44d16ca18f13 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Feb 2013 15:08:28 +0100 Subject: nl80211: remove channel width and extended capa advertising This is another case of data increasing the size of the wiphy information significantly with a new feature, for now remove this as well. Signed-off-by: Johannes Berg --- net/wireless/core.c | 3 +-- net/wireless/nl80211.c | 21 --------------------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/net/wireless/core.c b/net/wireless/core.c index 33b75b9b8efa..922002105062 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; - rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | - NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; + rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; return &rdev->wiphy; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e652d05ff712..7a7b621d45fd 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, if ((chan->flags & IEEE80211_CHAN_RADAR) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) - goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power))) @@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag dev->wiphy.max_acl_mac_addrs)) goto nla_put_failure; - if (dev->wiphy.extended_capabilities && - (nla_put(msg, NL80211_ATTR_EXT_CAPA, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities) || - nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, - dev->wiphy.extended_capabilities_len, - dev->wiphy.extended_capabilities_mask))) - goto nla_put_failure; - return genlmsg_end(msg, hdr); nla_put_failure: -- cgit v1.2.3 From feda30271e5455394c57e35eba66db88d1b15077 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 28 Feb 2013 09:59:22 +0100 Subject: mac80211: really fix monitor mode channel reporting After Felix's patch it was still broken in case you used more than just a single monitor interface. Fix it better now. Reported-by: Sujith Manoharan Tested-by: Sujith Manoharan Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 808f5fcd1ced..fb306814576a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, int ret = -ENODATA; rcu_read_lock(); - if (local->use_chanctx) { - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (chanctx_conf) { - *chandef = chanctx_conf->def; - ret = 0; - } - } else if (local->open_count == local->monitors) { - *chandef = local->monitor_chandef; + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + if (chanctx_conf) { + *chandef = chanctx_conf->def; + ret = 0; + } else if (local->open_count > 0 && + local->open_count == local->monitors && + sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (local->use_chanctx) + *chandef = local->monitor_chandef; + else + cfg80211_chandef_create(chandef, + local->_oper_channel, + local->_oper_channel_type); ret = 0; } rcu_read_unlock(); -- cgit v1.2.3 From 98891754ea9453de4db9111c91b20122ca330101 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Feb 2013 11:28:19 +0100 Subject: iwlwifi: don't map complete commands bidirectionally The reason we mapped them bidirectionally was that not doing so had caused IOMMU exceptions, due to the fact that the HW writes back into the command. Now that the first part of the command including the write-back part is always in the first buffer, we don't need to map the remaining buffer(s) bidi and can get rid of the special-casing for commands. This is a requisite patch for another one to fix DMA mapping. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/pcie/tx.c | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8b625a7f5685..975492f0b8c8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -367,8 +367,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) } static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, - enum dma_data_direction dma_dir) + struct iwl_cmd_meta *meta, + struct iwl_tfd *tfd) { int i; int num_tbs; @@ -392,7 +392,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); tfd->num_tbs = 0; } @@ -406,8 +407,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - enum dma_data_direction dma_dir) +static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_tfd *tfd_tmp = txq->tfds; @@ -418,8 +418,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], - dma_dir); + iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); /* free SKB */ if (txq->entries) { @@ -565,22 +564,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; - enum dma_data_direction dma_dir; if (!q->n_bd) return; - /* In the command queue, all the TBs are mapped as BIDI - * so unmap them as such. - */ - if (txq_id == trans_pcie->cmd_queue) - dma_dir = DMA_BIDIRECTIONAL; - else - dma_dir = DMA_TO_DEVICE; - spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { - iwl_pcie_txq_free_tfd(trans, txq, dma_dir); + iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } spin_unlock_bh(&txq->lock); @@ -962,7 +952,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); - iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); + iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(trans_pcie, txq); @@ -1340,11 +1330,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, - cmdlen[i], DMA_BIDIRECTIONAL); + cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr], - DMA_BIDIRECTIONAL); + &txq->tfds[q->write_ptr]); idx = -ENOMEM; goto out; } @@ -1418,7 +1407,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; - iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); + iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { -- cgit v1.2.3 From 1afbfb6041fb8f639e742620ad933c347e14ba2c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 26 Feb 2013 11:32:26 +0100 Subject: iwlwifi: rename IWL_MAX_CMD_TFDS to IWL_MAX_CMD_TBS_PER_TFD The IWL_MAX_CMD_TFDS name for this constant is wrong, the constant really indicates how many TBs we can use in the driver for a single command TFD, rename the constant and also add a comment explaining it. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/iwl-devtrace.h | 2 +- drivers/net/wireless/iwlwifi/iwl-trans.h | 12 ++++++++---- drivers/net/wireless/iwlwifi/pcie/tx.c | 12 ++++++------ 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 10f01793d7a6..81aa91fab5aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, __entry->flags = cmd->flags; memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { if (!cmd->len[i]) continue; memcpy((u8 *)__get_dynamic_array(hcmd) + offset, diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 8c7bec6b9a0b..058947f213b2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -217,7 +217,11 @@ struct iwl_device_cmd { #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) -#define IWL_MAX_CMD_TFDS 2 +/* + * number of transfer buffers (fragments) per transmit frame descriptor; + * this is just the driver's idea, the hardware supports 20 + */ +#define IWL_MAX_CMD_TBS_PER_TFD 2 /** * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command @@ -254,15 +258,15 @@ enum iwl_hcmd_dataflag { * @id: id of the host command */ struct iwl_host_cmd { - const void *data[IWL_MAX_CMD_TFDS]; + const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; unsigned long _rx_page_addr; u32 _rx_page_order; int handler_status; u32 flags; - u16 len[IWL_MAX_CMD_TFDS]; - u8 dataflags[IWL_MAX_CMD_TFDS]; + u16 len[IWL_MAX_CMD_TBS_PER_TFD]; + u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; u8 id; }; diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 975492f0b8c8..ff80a7e55f00 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1146,16 +1146,16 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, bool had_nocopy = false; int i; u32 cmd_pos; - const u8 *cmddata[IWL_MAX_CMD_TFDS]; - u16 cmdlen[IWL_MAX_CMD_TFDS]; + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); /* need one for the header if the first is NOCOPY */ - BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); + BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; @@ -1250,7 +1250,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* and copy the data that needs to be copied */ cmd_pos = offsetof(struct iwl_device_cmd, payload); copy_size = sizeof(out_cmd->hdr); - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy = 0; if (!cmd->len) @@ -1319,7 +1319,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); /* map the remaining (adjusted) nocopy/dup fragments */ - for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; if (!cmdlen[i]) -- cgit v1.2.3 From aed7d9ac1836defe033b561f4306e39014ac56fd Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 20 Feb 2013 11:33:00 +0200 Subject: iwlwifi: disable 8K A-MSDU by default Supporting 8K A-MSDU means that we need to allocate order 1 pages for every Rx packet. Even when there is no traffic. This adds stress on the memory manager. The handling of compound pages is also less trivial for the memory manager and not using them will make the allocation code run faster although I didn't really measure. Eric also pointed out that having huge buffers with little data in them is not very nice towards the TCP stack since the truesize of the skb is huge. This doesn't allow TCP to have a big Rx window. See https://patchwork.kernel.org/patch/2167711/ for details. Note that very few vendors will actually send A-MSDU. Disable this feature by default. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/iwl-drv.c | 3 +-- drivers/net/wireless/iwlwifi/iwl-modparams.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 6f228bb2b844..fbfd2d137117 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv) /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { - .amsdu_size_8K = 1, .restart_fw = 1, .plcp_check = true, .bt_coex_active = true, @@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index e5e3a79eae2f..2c2a729092f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -91,7 +91,7 @@ enum iwl_power_level { * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_DISABLE_HT_* constants - * @amsdu_size_8K: enable 8K amsdu size, default = 1 + * @amsdu_size_8K: enable 8K amsdu size, default = 0 * @restart_fw: restart firmware, default = 1 * @plcp_check: enable plcp health check, default = true * @wd_disable: enable stuck queue check, default = 0 -- cgit v1.2.3 From 38c0f334b359953f010e9b921e0b55278d3918f7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 27 Feb 2013 13:18:50 +0100 Subject: iwlwifi: use coherent DMA memory for command header Recently in commit 8a964f44e01ad3bbc208c3e80d931ba91b9ea786 ("iwlwifi: always copy first 16 bytes of commands") we fixed the problem that the hardware writes back to the command and that could overwrite parts of the data that was still needed and would thus be corrupted. Investigating this problem more closely we found that this write-back isn't really ordered very well with respect to other DMA traffic. Therefore, it sometimes happened that the write-back occurred after unmapping the command again which is clearly an issue and could corrupt the next allocation that goes to that spot, or (better) cause IOMMU faults. To fix this, allocate coherent memory for the first 16 bytes of each command, containing the write-back part, and use it for all queues. All the dynamic DMA mappings only need to be TO_DEVICE then. This ensures that even when the write-back happens "too late" it can't hit memory that has been freed or a mapping that doesn't exist any more. Since now the actual command is no longer modified, we can also remove CMD_WANT_HCMD and get rid of the DMA sync that was necessary to update the scratch pointer. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/dvm/sta.c | 2 +- drivers/net/wireless/iwlwifi/iwl-trans.h | 8 +- drivers/net/wireless/iwlwifi/pcie/internal.h | 34 +++-- drivers/net/wireless/iwlwifi/pcie/rx.c | 14 +- drivers/net/wireless/iwlwifi/pcie/tx.c | 221 +++++++++++++-------------- 5 files changed, 137 insertions(+), 142 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index 94ef33838bc6..b775769f8322 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); if (!(flags & CMD_ASYNC)) { - cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; + cmd.flags |= CMD_WANT_SKB; might_sleep(); } diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 058947f213b2..0cac2b7af78b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -186,19 +186,13 @@ struct iwl_rx_packet { * @CMD_ASYNC: Return right away and don't want for the response * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the * response. The caller needs to call iwl_free_resp when done. - * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the - * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be - * copied. The pointer passed to the response handler is in the transport - * ownership and don't need to be freed by the op_mode. This also means - * that the pointer is invalidated after the op_mode's handler returns. * @CMD_ON_DEMAND: This command is sent by the test mode pipe. */ enum CMD_MODE { CMD_SYNC = 0, CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), - CMD_WANT_HCMD = BIT(2), - CMD_ON_DEMAND = BIT(3), + CMD_ON_DEMAND = BIT(2), }; #define DEF_CMD_PAYLOAD_SIZE 320 diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 3d62e8055352..148843e7f34f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - - DEFINE_DMA_UNMAP_ADDR(mapping); - DEFINE_DMA_UNMAP_LEN(len); - u32 flags; }; @@ -185,25 +181,36 @@ struct iwl_queue { /* * The FH will write back to the first TB only, so we need * to copy some data into the buffer regardless of whether - * it should be mapped or not. This indicates how much to - * copy, even for HCMDs it must be big enough to fit the - * DRAM scratch from the TX cmd, at least 16 bytes. + * it should be mapped or not. This indicates how big the + * first TB must be to include the scratch buffer. Since + * the scratch is 4 bytes at offset 12, it's 16 now. If we + * make it bigger then allocations will be bigger and copy + * slower, so that's probably not useful. */ -#define IWL_HCMD_MIN_COPY_SIZE 16 +#define IWL_HCMD_SCRATCHBUF_SIZE 16 struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; - struct iwl_device_cmd *copy_cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; struct iwl_cmd_meta meta; }; +struct iwl_pcie_txq_scratch_buf { + struct iwl_cmd_header hdr; + u8 buf[8]; + __le32 scratch; +}; + /** * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) + * @scratchbufs: start of command headers, including scratch buffers, for + * the writeback -- this is DMA memory and an array holding one buffer + * for each command on the queue + * @scratchbufs_dma: DMA address for the scratchbufs start * @entries: transmit entries (driver state) * @lock: queue lock * @stuck_timer: timer that fires if queue gets stuck @@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry { struct iwl_txq { struct iwl_queue q; struct iwl_tfd *tfds; + struct iwl_pcie_txq_scratch_buf *scratchbufs; + dma_addr_t scratchbufs_dma; struct iwl_pcie_txq_entry *entries; spinlock_t lock; struct timer_list stuck_timer; @@ -225,6 +234,13 @@ struct iwl_txq { u8 active; }; +static inline dma_addr_t +iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) +{ + return txq->scratchbufs_dma + + sizeof(struct iwl_pcie_txq_scratch_buf) * idx; +} + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index b0ae06d2456f..567e67ad1f61 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) { - struct iwl_pcie_txq_entry *ent; - ent = &txq->entries[cmd_index]; - cmd = ent->copy_cmd; - WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); - } else { + if (reclaim) + cmd = txq->entries[cmd_index].cmd; + else cmd = NULL; - } err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); if (reclaim) { - /* The original command isn't needed any more */ - kfree(txq->entries[cmd_index].copy_cmd); - txq->entries[cmd_index].copy_cmd = NULL; - /* nor is the duplicated part of the command */ kfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index ff80a7e55f00..8595c16f74de 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) } for (i = q->read_ptr; i != q->write_ptr; - i = iwl_queue_inc_wrap(i, q->n_bd)) { - struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; + i = iwl_queue_inc_wrap(i, q->n_bd)) IWL_ERR(trans, "scratch %d = 0x%08x\n", i, - get_unaligned_le32(&tx_cmd->scratch)); - } + le32_to_cpu(txq->scratchbufs[i].scratch)); iwl_op_mode_nic_error(trans->op_mode); } @@ -382,14 +379,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, return; } - /* Unmap tx_cmd */ - if (num_tbs) - dma_unmap_single(trans->dev, - dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), - DMA_BIDIRECTIONAL); + /* first TB is never freed - it's the scratchbuf data */ - /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), iwl_pcie_tfd_tb_get_len(tfd, i), @@ -478,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; + size_t scratchbuf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) @@ -513,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); goto error; } + + BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); + BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch)); + + scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; + + txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, + &txq->scratchbufs_dma, + GFP_KERNEL); + if (!txq->scratchbufs) + goto err_free_tfds; + txq->q.id = txq_id; return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); error: if (txq->entries && txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) @@ -600,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); - kfree(txq->entries[i].copy_cmd); kfree(txq->entries[i].free_buf); } @@ -609,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) dma_free_coherent(dev, sizeof(struct iwl_tfd) * txq->q.n_bd, txq->tfds, txq->q.dma_addr); txq->q.dma_addr = 0; + + dma_free_coherent(dev, + sizeof(*txq->scratchbufs) * txq->q.n_window, + txq->scratchbufs, txq->scratchbufs_dma); } kfree(txq->entries); @@ -1142,7 +1153,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, void *dup_buf = NULL; dma_addr_t phys_addr; int idx; - u16 copy_size, cmd_size, dma_size; + u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; int i; u32 cmd_pos; @@ -1162,9 +1173,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; + /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; @@ -1256,9 +1267,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (!cmd->len) continue; - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; + /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; @@ -1276,48 +1287,36 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } } - WARN_ON_ONCE(txq->entries[idx].copy_cmd); - - /* - * since out_cmd will be the source address of the FH, it will write - * the retry count there. So when the user needs to receivce the HCMD - * that corresponds to the response in the response handler, it needs - * to set CMD_WANT_HCMD. - */ - if (cmd->flags & CMD_WANT_HCMD) { - txq->entries[idx].copy_cmd = - kmemdup(out_cmd, cmd_pos, GFP_ATOMIC); - if (unlikely(!txq->entries[idx].copy_cmd)) { - idx = -ENOMEM; - goto out; - } - } - IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); - /* - * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must - * still map at least that many bytes for the hardware to write back to. - * We have enough space, so that's not a problem. - */ - dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); + /* start the TFD with the scratchbuf */ + scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); + memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + iwl_pcie_txq_build_tfd(trans, txq, + iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), + scratch_size, 1); + + /* map first command fragment, if any remains */ + if (copy_size > scratch_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + scratch_size, + copy_size - scratch_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + idx = -ENOMEM; + goto out; + } - phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { - idx = -ENOMEM; - goto out; + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, + copy_size - scratch_size, 0); } - dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, dma_size); - - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); - /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { const void *data = cmddata[i]; @@ -1586,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; struct iwl_queue *q; - dma_addr_t phys_addr = 0; - dma_addr_t txcmd_phys; - dma_addr_t scratch_phys; - u16 len, firstlen, secondlen; + dma_addr_t tb0_phys, tb1_phys, scratch_phys; + void *tb1_addr; + u16 len, tb1_len, tb2_len; u8 wait_write_ptr = 0; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); @@ -1627,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); + tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); + scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd) + - sizeof(struct iwl_cmd_header) + hdr_len; - firstlen = (len + 3) & ~3; + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; + tb1_len = (len + 3) & ~3; /* Tell NIC about any 2-byte padding after MAC header */ - if (firstlen != len) + if (tb1_len != len) tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = dma_map_single(trans->dev, - &dev_cmd->hdr, firstlen, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) - goto out_err; - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); + /* The first TB points to the scratchbuf data - min_copy bytes */ + memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, + IWL_HCMD_SCRATCHBUF_SIZE); + iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, + IWL_HCMD_SCRATCHBUF_SIZE, 1); - if (!ieee80211_has_morefrags(fc)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; + tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) + goto out_err; + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - secondlen = skb->len - hdr_len; - if (secondlen > 0) { - phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, - secondlen, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { - dma_unmap_single(trans->dev, - dma_unmap_addr(out_meta, mapping), - dma_unmap_len(out_meta, len), - DMA_BIDIRECTIONAL); + /* + * Set up TFD's third entry to point directly to remainder + * of skb, if any (802.11 null frames have no payload). + */ + tb2_len = skb->len - hdr_len; + if (tb2_len > 0) { + dma_addr_t tb2_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); goto out_err; } + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); } - /* Attach buffers to TFD */ - iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1); - if (secondlen > 0) - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0); - - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); - - /* take back ownership of DMA buffer to enable update */ - dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, - DMA_BIDIRECTIONAL); - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); - dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, - DMA_BIDIRECTIONAL); - trace_iwlwifi_dev_tx(trans->dev, skb, &txq->tfds[txq->q.write_ptr], sizeof(struct iwl_tfd), - &dev_cmd->hdr, firstlen, - skb->data + hdr_len, secondlen); + &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, secondlen); + skb->data + hdr_len, tb2_len); + + if (!ieee80211_has_morefrags(fc)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } /* start timer if queue currently empty */ if (txq->need_update && q->read_ptr == q->write_ptr && -- cgit v1.2.3 From 645e77def93f1dd0e211c7244fbe152dac8a7100 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 1 Mar 2013 14:03:49 +0100 Subject: nl80211: increase wiphy dump size dynamically Given a device with many channels capabilities the wiphy information can still overflow even though its size in 3.9 was reduced to 3.8 levels. For new userspace and kernel 3.10 we're going to implement a new "split dump" protocol that can use multiple messages per wiphy. For now though, add a workaround to be able to send more information to userspace. Since generic netlink doesn't have a way to set the minimum dump size globally, and we wouldn't really want to set it globally anyway, increase the size only when needed, as described in the comments. As userspace might not be prepared for large buffers, we can only use 4k. Also increase the size for the get_wiphy command. Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7a7b621d45fd..d44ab216c0ec 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1307,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { - int idx = 0; + int idx = 0, ret; int start = cb->args[0]; struct cfg80211_registered_device *dev; @@ -1317,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) continue; if (++idx <= start) continue; - if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, - dev) < 0) { + ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + dev); + if (ret < 0) { + /* + * If sending the wiphy data didn't fit (ENOBUFS or + * EMSGSIZE returned), this SKB is still empty (so + * it's not too big because another wiphy dataset is + * already in the skb) and we've not tried to adjust + * the dump allocation yet ... then adjust the alloc + * size to be bigger, and return 1 but with the empty + * skb. This results in an empty message being RX'ed + * in userspace, but that is ignored. + * + * We can then retry with the larger buffer. + */ + if ((ret == -ENOBUFS || ret == -EMSGSIZE) && + !skb->len && + cb->min_dump_alloc < 4096) { + cb->min_dump_alloc = 4096; + mutex_unlock(&cfg80211_mutex); + return 1; + } idx--; break; } @@ -1336,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; struct cfg80211_registered_device *dev = info->user_ptr[0]; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(4096, GFP_KERNEL); if (!msg) return -ENOMEM; -- cgit v1.2.3 From 7cbf9d017dbb5e3276de7d527925d42d4c11e732 Mon Sep 17 00:00:00 2001 From: Marco Porsch Date: Fri, 1 Mar 2013 16:01:18 +0100 Subject: mac80211: fix oops on mesh PS broadcast forwarding Introduced with de74a1d9032f4d37ea453ad2a647e1aff4cd2591 "mac80211: fix WPA with VLAN on AP side with ps-sta". Apparently overwrites the sdata pointer with non-valid data in the case of mesh. Fix this by checking for IFTYPE_AP_VLAN. Signed-off-by: Marco Porsch Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c592a413bad9..0d74f2459585 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2755,7 +2755,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, cpu_to_le16(IEEE80211_FCTL_MOREDATA); } - sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, skb)) break; dev_kfree_skb_any(skb); -- cgit v1.2.3 From 24af717c35189f7a83c34e637256ccb7295a617b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 1 Mar 2013 17:33:18 +0100 Subject: mac80211: fix VHT MCS calculation The VHT MCSes we advertise to the AP were supposed to be restricted to the AP, but due to a bug in the logic mac80211 will advertise rates to the AP that aren't even supported by the local device. To fix this skip any adjustment if the NSS isn't supported at all. Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b756d2924690..141577412d84 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & mask) >> shift; + if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) + continue; + switch (ap_mcs) { default: if (our_mcs <= ap_mcs) -- cgit v1.2.3 From 801d929ca7d935ee199fd61d8ef914f51e892270 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 2 Mar 2013 19:05:47 +0100 Subject: mac80211: another fix for idle handling in monitor mode When setting a monitor interface up or down, the idle state needs to be recalculated, otherwise the hardware will just stay in its previous idle state. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/iface.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 640afab304d7..baaa8608e52d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); netif_carrier_on(dev); break; @@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); break; case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ -- cgit v1.2.3 From f9caed59f801f77a2844ab04d2dea8df33ac862b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 18 Feb 2013 21:51:07 +0000 Subject: netfilter: nf_ct_helper: Fix logging for dropped packets Update nf_ct_helper_log to emit args along with the format. Signed-off-by: Joe Perches Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_helper.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 013cdf69fe29..bb4188f170e3 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -341,6 +341,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, { const struct nf_conn_help *help; const struct nf_conntrack_helper *helper; + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; /* Called from the helper function, this call never fails */ help = nfct_help(ct); @@ -349,7 +356,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, helper = rcu_dereference(help->helper); nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, - "nf_ct_%s: dropping packet: %s ", helper->name, fmt); + "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); + + va_end(args); } EXPORT_SYMBOL_GPL(nf_ct_helper_log); -- cgit v1.2.3 From 715c998ff4d1106c3096bc5a48e4196663e6701a Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 28 Feb 2013 08:57:31 +0200 Subject: iwlwifi: mvm: restart the NIC of the cmd queue gets full This situation is clearly an error situation and the only way to recover is to restart the driver / fw. Signed-off-by: Emmanuel Grumbach Reviewed-by: Ilan Peer Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/ops.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index aa59adf87db3..d0f9c1e0475e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) ieee80211_free_txskb(mvm->hw, skb); } -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - iwl_mvm_dump_nic_error_log(mvm); - iwl_abort_notification_waits(&mvm->notif_wait); /* @@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) } } +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_mvm_dump_nic_error_log(mvm); + + iwl_mvm_nic_restart(mvm); +} + static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) { + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + WARN_ON(1); + iwl_mvm_nic_restart(mvm); } static const struct iwl_op_mode_ops iwl_mvm_ops = { -- cgit v1.2.3 From e07cbb536acb249db5fd63f6884354630ae875ad Mon Sep 17 00:00:00 2001 From: Dor Shaish Date: Wed, 27 Feb 2013 23:00:27 +0200 Subject: iwlwifi: mvm: Set valid TX antennas value before calib request We must set the valid TX antennas number in the ucode before sending the phy_cfg_cmd and request for calibrations. Signed-off-by: Dor Shaish Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/fw.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index d3d959db03a9..e6d51a9069c9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -446,6 +446,11 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); WARN_ON(ret); + /* Send TX valid antennas before triggering calibrations */ + ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); + if (ret) + goto error; + /* Override the calibrations from TLV and the const of fw */ iwl_set_default_calib_trigger(mvm); -- cgit v1.2.3 From 6221d47cf7a57eb1e2b5b51c65e2edcde369a0d4 Mon Sep 17 00:00:00 2001 From: Dor Shaish Date: Wed, 27 Feb 2013 15:55:48 +0200 Subject: iwlwifi: mvm: Take the phy_cfg from the TLV value The phy_cfg is given from the TLV value and does not have to be built by us. Signed-off-by: Dor Shaish Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/fw.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index e6d51a9069c9..d3c067e670a8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -241,20 +241,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return 0; } -#define IWL_HW_REV_ID_RAINBOW 0x2 -#define IWL_PROJ_TYPE_LHP 0x5 - -static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm) -{ - struct iwl_nvm_data *data = mvm->nvm_data; - /* Temp calls to static definitions, will be changed to CSR calls */ - u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW; - u8 project_type = IWL_PROJ_TYPE_LHP; - - return data->radio_cfg_dash | (data->radio_cfg_step << 2) | - (hw_rev_id << 4) | ((project_type & 0x7f) << 6) | - (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20); -} static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { @@ -262,7 +248,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) enum iwl_ucode_type ucode_type = mvm->cur_ucode; /* Set parameters */ - phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); + phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config); phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = -- cgit v1.2.3 From de8bc6dd2d52cacaa76ea381ffdc00919b100a2c Mon Sep 17 00:00:00 2001 From: Dor Shaish Date: Wed, 27 Feb 2013 13:01:09 +0200 Subject: iwlwifi: mvm: Remove overriding calibrations for the 7000 family This fix removes the override of calibration request values sent to the FW. Due to that, the sending of default values to now implemented calibrations is removed. Signed-off-by: Dor Shaish Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/fw.c | 114 ---------------------------------- 1 file changed, 114 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index d3c067e670a8..500f818dba04 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -79,17 +79,8 @@ #define UCODE_VALID_OK cpu_to_le32(0x1) /* Default calibration values for WkP - set to INIT image w/o running */ -static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f, - 0x00, 0x18, 0x00 }; -static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, - 0x7f, 0x7f, 0x7f }; -static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 }; -static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00, - 0x00 }; -static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 }; static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; -static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 }; struct iwl_calib_default_data { u16 size; @@ -99,12 +90,7 @@ struct iwl_calib_default_data { #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} static const struct iwl_calib_default_data wkp_calib_default_data[12] = { - [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc), - [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter), - [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo), - [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq), [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), - [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq), [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), }; @@ -261,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) sizeof(phy_cfg_cmd), &phy_cfg_cmd); } -/* Starting with the new PHY DB implementation - New calibs are enabled */ -/* Value - 0x405e7 */ -#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_BB_FILTER_IDX |\ - IWL_CALIB_CFG_LO_LEAKAGE_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_RX_IQ_IDX |\ - IWL_CALIB_CFG_AGC_IDX) - -#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0 - -/* Value 0x41567 */ -#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_BB_FILTER_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_RX_IQ_IDX |\ - IWL_CALIB_CFG_SENSITIVITY_IDX |\ - IWL_CALIB_CFG_AGC_IDX) - -#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\ - IWL_CALIB_CFG_TEMPERATURE_IDX |\ - IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ - IWL_CALIB_CFG_TX_PWR_IDX |\ - IWL_CALIB_CFG_DC_IDX |\ - IWL_CALIB_CFG_TX_IQ_IDX |\ - IWL_CALIB_CFG_SENSITIVITY_IDX) - -/* - * Sets the calibrations trigger values that will be sent to the FW for runtime - * and init calibrations. - * The ones given in the FW TLV are not correct. - */ -static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm) -{ - struct iwl_tlv_calib_ctrl default_calib; - - /* - * WkP FW TLV calib bits are wrong, overwrite them. - * This defines the dynamic calibrations which are implemented in the - * uCode both for init(flow) calculation and event driven calibs. - */ - - /* Init Image */ - default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT); - default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT); - - if (default_calib.event_trigger != - mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger) - IWL_ERR(mvm, - "Updating the event calib for INIT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger, - default_calib.event_trigger); - if (default_calib.flow_trigger != - mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger) - IWL_ERR(mvm, - "Updating the flow calib for INIT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger, - default_calib.flow_trigger); - - memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT], - &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); - IWL_ERR(mvm, - "Setting uCode init calibrations event 0x%x, trigger 0x%x\n", - default_calib.event_trigger, - default_calib.flow_trigger); - - /* Run time image */ - default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN); - default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN); - - if (default_calib.event_trigger != - mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger) - IWL_ERR(mvm, - "Updating the event calib for RT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger, - default_calib.event_trigger); - if (default_calib.flow_trigger != - mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger) - IWL_ERR(mvm, - "Updating the flow calib for RT image: 0x%x -> 0x%x\n", - mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger, - default_calib.flow_trigger); - - memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR], - &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); - IWL_ERR(mvm, - "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n", - default_calib.event_trigger, - default_calib.flow_trigger); -} - static int iwl_set_default_calibrations(struct iwl_mvm *mvm) { u8 cmd_raw[16]; /* holds the variable size commands */ @@ -437,9 +326,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (ret) goto error; - /* Override the calibrations from TLV and the const of fw */ - iwl_set_default_calib_trigger(mvm); - /* WkP doesn't have all calibrations, need to set default values */ if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { ret = iwl_set_default_calibrations(mvm); -- cgit v1.2.3 From f9aa8dd33714f17c7229ad89309406a1ccb3cd3f Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 4 Mar 2013 09:11:08 +0200 Subject: iwlwifi: mvm: ignore STOP_AGG when restarting Since the device is being restarted, all the Rx / Tx Block Ack sessions are been wiped out by the driver. So ignore the requests from mac80211 that stops Tx agg while reconfiguring the device. Note that stopping a non-existing Rx BA session is harmless, so just honor mac80211's request. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/sta.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 861a7f9f8e7f..274f44e2ef60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 txq_id; int err; + + /* + * If mac80211 is cleaning its state, then say that we finished since + * our state has been cleared anyway. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + return 0; + } + spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; -- cgit v1.2.3 From 8101a7f0656bf11c385d6e14f52313b19f017e70 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 28 Feb 2013 11:54:28 +0200 Subject: iwlwifi: mvm: update the rssi calculation Make the rssi more accurate by taking in count per-chain AGC values. Without this, the RSSI reports inaccurate values. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/fw-api.h | 18 ++++++++------- drivers/net/wireless/iwlwifi/mvm/mvm.h | 3 ++- drivers/net/wireless/iwlwifi/mvm/rx.c | 37 +++++++++++++++++++------------ 3 files changed, 35 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 23eebda848b0..2adb61f103f4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -762,18 +762,20 @@ struct iwl_phy_context_cmd { #define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_AGC_IDX 1 #define IWL_RX_INFO_RSSI_AB_IDX 2 -#define IWL_RX_INFO_RSSI_C_IDX 3 -#define IWL_OFDM_AGC_DB_MSK 0xfe00 -#define IWL_OFDM_AGC_DB_POS 9 +#define IWL_OFDM_AGC_A_MSK 0x0000007f +#define IWL_OFDM_AGC_A_POS 0 +#define IWL_OFDM_AGC_B_MSK 0x00003f80 +#define IWL_OFDM_AGC_B_POS 7 +#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000 +#define IWL_OFDM_AGC_CODE_POS 20 #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff -#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 #define IWL_OFDM_RSSI_A_POS 0 +#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 +#define IWL_OFDM_RSSI_ALLBAND_A_POS 8 #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 -#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 #define IWL_OFDM_RSSI_B_POS 16 -#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff -#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 -#define IWL_OFDM_RSSI_C_POS 0 +#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 +#define IWL_OFDM_RSSI_ALLBAND_B_POS 24 /** * struct iwl_rx_phy_info - phy info diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 537711b10478..bdae700c769e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -80,7 +80,8 @@ #define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_MVM_MAX_ADDRESSES 2 -#define IWL_RSSI_OFFSET 44 +/* RSSI offset for WkP */ +#define IWL_RSSI_OFFSET 50 enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 3f40ab05bbd8..b0b190d0ec23 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info) { - u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; + int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; + int rssi_all_band_a, rssi_all_band_b; + u32 agc_a, agc_b, max_agc; u32 val; - /* Find max rssi among 3 possible receivers. + /* Find max rssi among 2 possible receivers. * These values are measured by the Digital Signal Processor (DSP). * They should stay fairly constant even as the signal strength varies, * if the radio's Automatic Gain Control (AGC) is working right. * AGC value (see below) will provide the "interesting" info. */ + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); + agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; + agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; + max_agc = max_t(u32, agc_a, agc_b); + val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; - val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); - rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; - - val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); - agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS; + rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >> + IWL_OFDM_RSSI_ALLBAND_A_POS; + rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >> + IWL_OFDM_RSSI_ALLBAND_B_POS; - max_rssi = max_t(u32, rssi_a, rssi_b); - max_rssi = max_t(u32, max_rssi, rssi_c); + /* + * dBm = rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. + */ + rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a; + rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b; + max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm); - IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - rssi_a, rssi_b, rssi_c, max_rssi, agc_db); + IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", + rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc_db - IWL_RSSI_OFFSET; + return max_rssi_dbm; } /* -- cgit v1.2.3 From 2470b36e84a2e680d7a7e3809cbceae5bfae3606 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 3 Mar 2013 14:35:03 +0200 Subject: iwlwifi: mvm: don't warn on normal BAR sending This flow happens when we get a failed single Tx response on an AMPDU queue. In this case, the frame won't be sent any more. So we need to move the window on the recipient side. This is done by a BAR. Now if we are in the following case: 10, 12 and 13 are ACKed and 11 isn't. 10 11 12 13. V X V V Then, 11 will be sent 16 times as an MPDU (as oppsed to A-MPDU). If this failed, we are entering the flow described above. So we need to send a BAR with ssn = 12. But in this case, the scheduler will tell us to free frames up to 13 (included). So, it is perfectly possible to get a failed single Tx response on an AMPDU queue that makes the scheduler's ssn jump by more than 1 single packet. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/tx.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 6b67ce3f679c..6645efe5c03e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* Single frame failure in an AMPDU queue => send BAR */ if (txq_id >= IWL_FIRST_AMPDU_QUEUE && - !(info->flags & IEEE80211_TX_STAT_ACK)) { - /* there must be only one skb in the skb_list */ - WARN_ON_ONCE(skb_freed > 1 || - !skb_queue_empty(&skbs)); + !(info->flags & IEEE80211_TX_STAT_ACK)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - } /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { -- cgit v1.2.3 From ed018fa4dfc3d26da56b9ee7dc75e9d39a39a02b Mon Sep 17 00:00:00 2001 From: Gao feng Date: Mon, 4 Mar 2013 00:29:12 +0000 Subject: netfilter: xt_AUDIT: only generate audit log when audit enabled We should stop generting audit log if audit is disabled. Signed-off-by: Gao feng Acked-by: Thomas Graf Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_AUDIT.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c @@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_audit_info *info = par->targinfo; struct audit_buffer *ab; + if (audit_enabled == 0) + goto errout; + ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); if (ab == NULL) goto errout; -- cgit v1.2.3 From 9df9e7832391cf699abbf39fc8d95d7e78297462 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Mon, 4 Mar 2013 02:45:41 +0000 Subject: netfilter: nfnetlink: silence warning if CONFIG_PROVE_RCU isn't set Since commit c14b78e7decd0d1d5add6a4604feb8609fe920a9 ("netfilter: nfnetlink: add mutex per subsystem") building nefnetlink.o without CONFIG_PROVE_RCU set, triggers this GCC warning: net/netfilter/nfnetlink.c:65:22: warning: ‘nfnl_get_lock’ defined but not used [-Wunused-function] The cause of that warning is, in short, that rcu_lockdep_assert() compiles away if CONFIG_PROVE_RCU is not set. Silence this warning by open coding nfnl_get_lock() in the sole place it was called, which allows to remove that function. Signed-off-by: Paul Bolle Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) } EXPORT_SYMBOL_GPL(nfnl_unlock); -static struct mutex *nfnl_get_lock(__u8 subsys_id) -{ - return &table[subsys_id].mutex; -} - int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) { nfnl_lock(n->subsys_id); @@ -199,7 +194,7 @@ replay: rcu_read_unlock(); nfnl_lock(subsys_id); if (rcu_dereference_protected(table[subsys_id].subsys, - lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || + lockdep_is_held(&table[subsys_id].mutex)) != ss || nfnetlink_find_client(type, ss) != nc) err = -EAGAIN; else if (nc->call) -- cgit v1.2.3 From e08f626b33eb636dbf38b21618ab32b7fd8e1ec4 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Wed, 20 Feb 2013 03:06:34 +0000 Subject: e1000e: workaround DMA unit hang on I218 At 1000Mbps link speed, one of the MAC's internal clocks can be stopped for up to 4us when entering K1 (a power mode of the MAC-PHY interconnect). If the MAC is waiting for completion indications for 2 DMA write requests into Host memory (e.g. descriptor writeback or Rx packet writing) and the indications occur while the clock is stopped, both indications will be missed by the MAC causing the MAC to wait for the completion indications and be unable to generate further DMA write requests. This results in an apparent hardware hang. Work-around the issue by disabling the de-assertion of the clock request when 1000Mbps link is acquired (K1 must be disabled while doing this). Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 71 ++++++++++++++++++++++++++++- drivers/net/ethernet/intel/e1000e/ich8lan.h | 2 + drivers/net/ethernet/intel/e1000e/regs.h | 1 + 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index dff7bff8b8e0..121a865c7fbd 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -781,6 +781,59 @@ release: return ret_val; } +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + s32 ret_val = 0; + + if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { + u16 kmrn_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + return ret_val; +} + /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure @@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; @@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + if (hw->phy.type == e1000_phy_i217) { - u16 phy_reg; + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index b6d3174d7d2d..8bf4655c2e17 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -92,6 +92,8 @@ #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 794fe1497666..a7e6a3e37257 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -42,6 +42,7 @@ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -- cgit v1.2.3 From e4f7dbb17e797d922d72567f37de3735722034ba Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 21 Feb 2013 03:08:50 +0000 Subject: igb: Drop BUILD_BUG_ON check from igb_build_rx_buffer On s390 the igb driver was throwing a build error due to the fact that a frame built using build_skb would be larger than 2K. Since this is not likely to change at any point in the future we are better off just dropping the check since we already had a check in igb_set_rx_buffer_len that will just disable the usage of build_skb anyway. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed79a1c53b59..4d53a17959fa 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6227,13 +6227,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, /* If we spanned a buffer we have a huge mess so test for it */ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - /* Guarantee this function can be used by verifying buffer sizes */ - BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD + - NET_IP_ALIGN + - IGB_TS_HDR_LEN + - ETH_FRAME_LEN + - ETH_FCS_LEN)); - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); -- cgit v1.2.3 From ed65bdd8c0086d69948e6380dba0cc279a6906de Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 6 Feb 2013 03:35:27 +0000 Subject: igb: Fix link setup for I210 devices This patch changes the setup copper link function to use a switch statement for the PHY id's available for the given PHY types. It also adds a case for the I210 PHY id, so the appropriate setup link function is called for it. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 84e7e0909def..b64542acfa34 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) switch (hw->phy.type) { case e1000_phy_i210: case e1000_phy_m88: - if (hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); - else + break; + default: ret_val = igb_copper_link_setup_m88(hw); + break; + } break; case e1000_phy_igp_3: ret_val = igb_copper_link_setup_igp(hw); -- cgit v1.2.3 From 603e86fa39cd48edba5ee8a4d19637bd41e2a8bf Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 20 Feb 2013 07:40:55 +0000 Subject: igb: Fix for lockdep issue in igb_get_i2c_client This patch fixes a lockdep warning in igb_get_i2c_client by refactoring the initialization and usage of the i2c_client completely. There is no on the fly allocation of the single client needed today. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 2 +- drivers/net/ethernet/intel/igb/igb_hwmon.c | 14 ++++++ drivers/net/ethernet/intel/igb/igb_main.c | 69 +----------------------------- 3 files changed, 17 insertions(+), 68 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d27edbc63923..25151401c2ab 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -447,7 +447,7 @@ struct igb_adapter { #endif struct i2c_algo_bit_data i2c_algo; struct i2c_adapter i2c_adap; - struct igb_i2c_client_list *i2c_clients; + struct i2c_client *i2c_client; }; #define IGB_FLAG_HAS_MSI (1 << 0) diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0a9b073d0b03..4623502054d5 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -39,6 +39,10 @@ #include #ifdef CONFIG_IGB_HWMON +struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + /* hwmon callback functions */ static ssize_t igb_hwmon_show_location(struct device *dev, struct device_attribute *attr, @@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter) unsigned int i; int n_attrs; int rc = 0; + struct i2c_client *client = NULL; /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) @@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter) if (rc) goto exit; + /* init i2c_client */ + client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); + if (client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device..\n"); + goto exit; + } + adapter->i2c_client = client; + /* Allocation space for max attributes * max num sensors * values (loc, temp, max, caution) */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4d53a17959fa..4dbd62968c7a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) return; } -static const struct i2c_board_info i350_sensor_info = { - I2C_BOARD_INFO("i350bb", 0Xf8), -}; - /* igb_init_i2c - Init I2C interface * @adapter: pointer to adapter structure * @@ -7717,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) } } -static DEFINE_SPINLOCK(i2c_clients_lock); - -/* igb_get_i2c_client - returns matching client - * in adapters's client list. - * @adapter: adapter struct - * @dev_addr: device address of i2c needed. - */ -static struct i2c_client * -igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) -{ - ulong flags; - struct igb_i2c_client_list *client_list; - struct i2c_client *client = NULL; - struct i2c_board_info client_info = { - I2C_BOARD_INFO("igb", 0x00), - }; - - spin_lock_irqsave(&i2c_clients_lock, flags); - client_list = adapter->i2c_clients; - - /* See if we already have an i2c_client */ - while (client_list) { - if (client_list->client->addr == (dev_addr >> 1)) { - client = client_list->client; - goto exit; - } else { - client_list = client_list->next; - } - } - - /* no client_list found, create a new one */ - client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC); - if (client_list == NULL) - goto exit; - - /* dev_addr passed to us is left-shifted by 1 bit - * i2c_new_device call expects it to be flush to the right. - */ - client_info.addr = dev_addr >> 1; - client_info.platform_data = adapter; - client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info); - if (client_list->client == NULL) { - dev_info(&adapter->pdev->dev, - "Failed to create new i2c device..\n"); - goto err_no_client; - } - - /* insert new client at head of list */ - client_list->next = adapter->i2c_clients; - adapter->i2c_clients = client_list; - - client = client_list->client; - goto exit; - -err_no_client: - kfree(client_list); -exit: - spin_unlock_irqrestore(&i2c_clients_lock, flags); - return client; -} - /* igb_read_i2c_byte - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read @@ -7791,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = 0; @@ -7828,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = E1000_SWFW_PHY0_SM; -- cgit v1.2.3 From a4ed2e737cb73e4405a3649f8aef7619b99fecae Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Tue, 5 Mar 2013 06:09:04 +0000 Subject: net/irda: Fix port open counts Saving the port count bump is unsafe. If the tty is hung up while this open was blocking, the port count is zeroed. Explicitly check if the tty was hung up while blocking, and correct the port count if not. Signed-off-by: Peter Hurley Signed-off-by: David S. Miller --- net/irda/ircomm/ircomm_tty.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..1721dc7e4315 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, struct tty_port *port = &self->port; DECLARE_WAITQUEUE(wait, current); int retval; - int do_clocal = 0, extra_count = 0; + int do_clocal = 0; unsigned long flags; IRDA_DEBUG(2, "%s()\n", __func__ ); @@ -315,10 +315,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __FILE__, __LINE__, tty->driver->name, port->count); spin_lock_irqsave(&port->lock, flags); - if (!tty_hung_up_p(filp)) { - extra_count = 1; + if (!tty_hung_up_p(filp)) port->count--; - } spin_unlock_irqrestore(&port->lock, flags); port->blocked_open++; @@ -361,12 +359,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, __set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); - if (extra_count) { - /* ++ is not atomic, so this should be protected - Jean II */ - spin_lock_irqsave(&port->lock, flags); + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) port->count++; - spin_unlock_irqrestore(&port->lock, flags); - } + spin_unlock_irqrestore(&port->lock, flags); port->blocked_open--; IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", -- cgit v1.2.3 From 2f7c069b96ed7b1f6236f2fa7b0bc06f4f54f2d9 Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Tue, 5 Mar 2013 06:09:05 +0000 Subject: net/irda: Hold port lock while bumping blocked_open Although tty_lock() already protects concurrent update to blocked_open, that fails to meet the separation-of-concerns between tty_port and tty. Signed-off-by: Peter Hurley Signed-off-by: David S. Miller --- net/irda/ircomm/ircomm_tty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 1721dc7e4315..d282bbea710e 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -317,8 +317,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) port->count--; - spin_unlock_irqrestore(&port->lock, flags); port->blocked_open++; + spin_unlock_irqrestore(&port->lock, flags); while (1) { if (tty->termios.c_cflag & CBAUD) @@ -362,8 +362,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) port->count++; - spin_unlock_irqrestore(&port->lock, flags); port->blocked_open--; + spin_unlock_irqrestore(&port->lock, flags); IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", __FILE__, __LINE__, tty->driver->name, port->count); -- cgit v1.2.3 From 0b176ce3a7cbfa92eceddf3896f1a504877d974a Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Tue, 5 Mar 2013 06:09:06 +0000 Subject: net/irda: Use barrier to set task state Without a memory and compiler barrier, the task state change can migrate relative to the condition testing in a blocking loop. However, the task state change must be visible across all cpus prior to testing those conditions. Failing to do this can result in the familiar 'lost wakeup' and this task will hang until killed. Signed-off-by: Peter Hurley Signed-off-by: David S. Miller --- net/irda/ircomm/ircomm_tty.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d282bbea710e..522543d9264a 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -324,7 +324,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, if (tty->termios.c_cflag & CBAUD) tty_port_raise_dtr_rts(port); - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !test_bit(ASYNCB_INITIALIZED, &port->flags)) { -- cgit v1.2.3 From f74861ca87264e594e0134e8ac14db06be77fe6f Mon Sep 17 00:00:00 2001 From: Peter Hurley Date: Tue, 5 Mar 2013 06:09:07 +0000 Subject: net/irda: Raise dtr in non-blocking open DTR/RTS need to be raised, regardless of the open() mode, but not if the port has already shutdown. Signed-off-by: Peter Hurley Signed-off-by: David S. Miller --- net/irda/ircomm/ircomm_tty.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 522543d9264a..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ - if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ - /* nonblock mode is set or port is not enabled */ + if (test_bit(TTY_IO_ERROR, &tty->flags)) { + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (filp->f_flags & O_NONBLOCK) { + /* nonblock mode is set */ + if (tty->termios.c_cflag & CBAUD) + tty_port_raise_dtr_rts(port); port->flags |= ASYNC_NORMAL_ACTIVE; IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); return 0; -- cgit v1.2.3 From 9b99b7e90bf5cd9a88823c49574ba80d92a98262 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:04:57 +0000 Subject: pkt_sched: sch_qfq: properly cap timestamps in charge_actual_service QFQ+ schedules the active aggregates in a group using a bucket list (one list per group). The bucket in which each aggregate is inserted depends on the aggregate's timestamps, and the number of buckets in a group is enough to accomodate the possible (range of) values of the timestamps of all the aggregates in the group. For this property to hold, timestamps must however be computed correctly. One necessary condition for computing timestamps correctly is that the number of bits dequeued for each aggregate, while the aggregate is in service, does not exceed the maximum budget budgetmax assigned to the aggregate. For each aggregate, budgetmax is proportional to the number of classes in the aggregate. If the number of classes of the aggregate is decreased through qfq_change_class(), then budgetmax is decreased automatically as well. Problems may occur if the aggregate is in service when budgetmax is decreased, because the current remaining budget of the aggregate and/or the service already received by the aggregate may happen to be larger than the new value of budgetmax. In this case, when the aggregate is eventually deselected and its timestamps are updated, the aggregate may happen to have received an amount of service larger than budgetmax. This may cause the aggregate to be assigned a higher virtual finish time than the maximum acceptable value for the last bucket in the bucket list of the group. This fix introduces a cap that addresses this issue. Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..489edd929932 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, new_num_classes == q->max_agg_classes - 1) /* agg no more full */ hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); + /* The next assignment may let + * agg->initial_budget > agg->budgetmax + * hold, we will take it into account in charge_actual_service(). + */ agg->budgetmax = new_num_classes * agg->lmax; new_agg_weight = agg->class_weight * new_num_classes; agg->inv_w = ONE_FP/new_agg_weight; @@ -988,8 +992,13 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, /* Update F according to the actual service received by the aggregate. */ static inline void charge_actual_service(struct qfq_aggregate *agg) { - /* compute the service received by the aggregate */ - u32 service_received = agg->initial_budget - agg->budget; + /* Compute the service received by the aggregate, taking into + * account that, after decreasing the number of classes in + * agg, it may happen that + * agg->initial_budget - agg->budget > agg->bugdetmax + */ + u32 service_received = min(agg->budgetmax, + agg->initial_budget - agg->budget); agg->F = agg->S + (u64)service_received * agg->inv_w; } -- cgit v1.2.3 From 624b85fb96206879c8146abdba071222bbd25259 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:04:58 +0000 Subject: pkt_sched: sch_qfq: fix the update of eligible-group sets Between two invocations of make_eligible, the system virtual time may happen to grow enough that, in its binary representation, a bit with higher order than 31 flips. This happens especially with TSO/GSO. Before this fix, the mask used in make_eligible was computed as (1UL< 31. The fix just replaces 1UL with 1ULL. Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 489edd929932..39d85cc0db26 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -821,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) unsigned long old_vslot = q->oldV >> q->min_slot_shift; if (vslot != old_vslot) { - unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; + unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; qfq_move_groups(q, mask, IR, ER); qfq_move_groups(q, mask, IB, EB); } -- cgit v1.2.3 From 2f3b89a1fe0823fceb544856c9eeb036a75ff091 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:04:59 +0000 Subject: pkt_sched: sch_qfq: serve activated aggregates immediately if the scheduler is empty If no aggregate is in service, then the function qfq_dequeue() does not dequeue any packet. For this reason, to guarantee QFQ+ to be work conserving, a just-activated aggregate must be set as in service immediately if it happens to be the only active aggregate. This is done by the function qfq_enqueue(). Unfortunately, the function qfq_add_to_agg(), used to add a class to an aggregate, does not perform this important additional operation. In particular, if: 1) qfq_add_to_agg() is invoked to complete the move of a class from a source aggregate, becoming, for this move, inactive, to a destination aggregate, becoming instead active, and 2) the destination aggregate becomes the only active aggregate, then this aggregate is not however set as in service. QFQ+ remains then in a non-work-conserving state until a new invocation of qfq_enqueue() recovers the situation. This fix solves the problem by moving the logic for setting an aggregate as in service directly into the function qfq_activate_agg(). Hence, from whatever point qfq_activate_aggregate() is invoked, QFQ+ remains work conserving. Since the more-complex logic of this new version of activate_aggregate() is not necessary, in qfq_dequeue(), to reschedule an aggregate that finishes its budget, then the aggregate is now rescheduled by invoking directly the functions needed. Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 39d85cc0db26..8c5172c7f193 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1003,6 +1003,12 @@ static inline void charge_actual_service(struct qfq_aggregate *agg) agg->F = agg->S + (u64)service_received * agg->inv_w; } +static inline void qfq_update_agg_ts(struct qfq_sched *q, + struct qfq_aggregate *agg, + enum update_reason reason); + +static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); + static struct sk_buff *qfq_dequeue(struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); @@ -1030,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) in_serv_agg->initial_budget = in_serv_agg->budget = in_serv_agg->budgetmax; - if (!list_empty(&in_serv_agg->active)) + if (!list_empty(&in_serv_agg->active)) { /* * Still active: reschedule for * service. Possible optimization: if no other @@ -1041,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) * handle it, we would need to maintain an * extra num_active_aggs field. */ - qfq_activate_agg(q, in_serv_agg, requeue); - else if (sch->q.qlen == 0) { /* no aggregate to serve */ + qfq_update_agg_ts(q, in_serv_agg, requeue); + qfq_schedule_agg(q, in_serv_agg); + } else if (sch->q.qlen == 0) { /* no aggregate to serve */ q->in_serv_agg = NULL; return NULL; } @@ -1226,17 +1233,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = agg->lmax; list_add_tail(&cl->alist, &agg->active); - if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) - return err; /* aggregate was not empty, nothing else to do */ - - /* recharge budget */ - agg->initial_budget = agg->budget = agg->budgetmax; + if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || + q->in_serv_agg == agg) + return err; /* non-empty or in service, nothing else to do */ - qfq_update_agg_ts(q, agg, enqueue); - if (q->in_serv_agg == NULL) - q->in_serv_agg = agg; - else if (agg != q->in_serv_agg) - qfq_schedule_agg(q, agg); + qfq_activate_agg(q, agg, enqueue); return err; } @@ -1293,8 +1294,15 @@ skip_update: static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, enum update_reason reason) { + agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ + qfq_update_agg_ts(q, agg, reason); - qfq_schedule_agg(q, agg); + if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ + q->in_serv_agg = agg; /* start serving this aggregate */ + /* update V: to be in service, agg must be eligible */ + q->oldV = q->V = agg->S; + } else if (agg != q->in_serv_agg) + qfq_schedule_agg(q, agg); } static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, -- cgit v1.2.3 From a0143efa96671dc51dab9bba776a66f9bfa1757f Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:05:00 +0000 Subject: pkt_sched: sch_qfq: prevent budget from wrapping around after a dequeue Aggregate budgets are computed so as to guarantee that, after an aggregate has been selected for service, that aggregate has enough budget to serve at least one maximum-size packet for the classes it contains. For this reason, after a new aggregate has been selected for service, its next packet is immediately dequeued, without any further control. The maximum packet size for a class, lmax, can be changed through qfq_change_class(). In case the user sets lmax to a lower value than the the size of some of the still-to-arrive packets, QFQ+ will automatically push up lmax as it enqueues these packets. This automatic push up is likely to happen with TSO/GSO. In any case, if lmax is assigned a lower value than the size of some of the packets already enqueued for the class, then the following problem may occur: the size of the next packet to dequeue for the class may happen to be larger than lmax, after the aggregate to which the class belongs has been just selected for service. In this case, even the budget of the aggregate, which is an unsigned value, may be lower than the size of the next packet to dequeue. After dequeueing this packet and subtracting its size from the budget, the latter would wrap around. This fix prevents the budget from wrapping around after any packet dequeue. Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8c5172c7f193..c34af93ddfe2 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1068,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) qdisc_bstats_update(sch, skb); agg_dequeue(in_serv_agg, cl, len); - in_serv_agg->budget -= len; + /* If lmax is lowered, through qfq_change_class, for a class + * owning pending packets with larger size than the new value + * of lmax, then the following condition may hold. + */ + if (unlikely(in_serv_agg->budget < len)) + in_serv_agg->budget = 0; + else + in_serv_agg->budget -= len; + q->V += (u64)len * IWSUM; pr_debug("qfq dequeue: len %u F %lld now %lld\n", len, (unsigned long long) in_serv_agg->F, -- cgit v1.2.3 From 40dd2d546198e7bbb8d3fe718957b158caa3fe52 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:05:01 +0000 Subject: pkt_sched: sch_qfq: do not allow virtual time to jump if an aggregate is in service By definition of (the algorithm of) QFQ+, the system virtual time must be pushed up only if there is no 'eligible' aggregate, i.e. no aggregate that would have started to be served also in the ideal system emulated by QFQ+. QFQ+ serves only eligible aggregates, hence the aggregate currently in service is eligible. As a consequence, to decide whether there is no eligible aggregate, QFQ+ must also check whether there is no aggregate in service. Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index c34af93ddfe2..6b7ce803d07c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1279,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) /* group was surely ineligible, remove */ __clear_bit(grp->index, &q->bitmaps[IR]); __clear_bit(grp->index, &q->bitmaps[IB]); - } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) + } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && + q->in_serv_agg == NULL) q->V = roundedS; grp->S = roundedS; -- cgit v1.2.3 From 76e4cb0d3a583900f844f1704b19b7e8c5df8837 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 5 Mar 2013 08:05:02 +0000 Subject: pkt_sched: sch_qfq: remove a useless invocation of qfq_update_eligible QFQ+ can select for service only 'eligible' aggregates, i.e., aggregates that would have started to be served also in the emulated ideal system. As a consequence, for QFQ+ to be work conserving, at least one of the active aggregates must be eligible when it is time to choose the next aggregate to serve. The set of eligible aggregates is updated through the function qfq_update_eligible(), which does guarantee that, after its invocation, at least one of the active aggregates is eligible. Because of this property, this function is invoked in qfq_deactivate_agg() to guarantee that at least one of the active aggregates is still eligible after an aggregate has been deactivated. In particular, the critical case is when there are other active aggregates, but the aggregate being deactivated happens to be the only one eligible. However, this precaution is not needed for QFQ+ to be work conserving, because update_eligible() is always invoked also at the beginning of qfq_choose_next_agg(). This patch removes the additional invocation of update_eligible() in qfq_deactivate_agg(). Signed-off-by: Paolo Valente Reviewed-by: Fabio Checconi Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 6b7ce803d07c..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1383,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) __set_bit(grp->index, &q->bitmaps[s]); } } - - qfq_update_eligible(q); } static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) -- cgit v1.2.3 From 88c4c066c6b4db26dc4909ee94e6bf377e8e8e81 Mon Sep 17 00:00:00 2001 From: Zang MingJie Date: Mon, 4 Mar 2013 06:07:34 +0000 Subject: reset nf before xmit vxlan encapsulated packet We should reset nf settings bond to the skb as ipip/ipgre do. If not, the conntrack/nat info bond to the origin packet may continually redirect the packet to vxlan interface causing a routing loop. this is the scenario: VETP VXLAN Gateway /----\ /---------------\ | | | | | vx+--+vx --NAT-> eth0+--> Internet | | | | \----/ \---------------/ when there are any packet coming from internet to the vetp, there will be lots of garbage packets coming out the gateway's vxlan interface, but none actually sent to the physical interface, because they are redirected back to the vxlan interface in the postrouting chain of NAT rule, and dmesg complains: Mar 1 21:52:53 debian kernel: [ 8802.997699] Dead loop on virtual device vxlan0, fix it urgently! Mar 1 21:52:54 debian kernel: [ 8804.004907] Dead loop on virtual device vxlan0, fix it urgently! Mar 1 21:52:55 debian kernel: [ 8805.012189] Dead loop on virtual device vxlan0, fix it urgently! Mar 1 21:52:56 debian kernel: [ 8806.020593] Dead loop on virtual device vxlan0, fix it urgently! the patch should fix the problem Signed-off-by: Zang MingJie Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f10e58ac9c1b..c3e3d2929ee3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); tunnel_ip_select_ident(skb, old_iph, &rt->dst); + nf_reset(skb); + vxlan_set_owner(dev, skb); /* See iptunnel_xmit() */ -- cgit v1.2.3 From 691b3b7e1329141acf1e5ed44d8b468cea065fe3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 4 Mar 2013 12:32:43 +0000 Subject: net: fix new kernel-doc warnings in net core Fix new kernel-doc warnings in net/core/dev.c: Warning(net/core/dev.c:4788): No description found for parameter 'new_carrier' Warning(net/core/dev.c:4788): Excess function parameter 'new_carries' description in 'dev_change_carrier' Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..5ca8734ae63a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4780,7 +4780,7 @@ EXPORT_SYMBOL(dev_set_mac_address); /** * dev_change_carrier - Change device carrier * @dev: device - * @new_carries: new value + * @new_carrier: new value * * Change device carrier */ -- cgit v1.2.3 From d1f41b67ff7735193bc8b418b98ac99a448833e2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 5 Mar 2013 07:15:13 +0000 Subject: net: reduce net_rx_action() latency to 2 HZ We should use time_after_eq() to get maximum latency of two ticks, instead of three. Bug added in commit 24f8b2385 (net: increase receive packet quantum) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 5ca8734ae63a..8f152f904f70 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4103,7 +4103,7 @@ static void net_rx_action(struct softirq_action *h) * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ - if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) + if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) goto softnet_break; local_irq_enable(); -- cgit v1.2.3 From fa2b04f4502d74659e4e4b1294c6d88e08ece032 Mon Sep 17 00:00:00 2001 From: David Ward Date: Tue, 5 Mar 2013 17:06:32 +0000 Subject: net/ipv4: Timestamp option cannot overflow with prespecified addresses When a router forwards a packet that contains the IPv4 timestamp option, if there is no space left in the option for the router to add its own timestamp, then the router increments the Overflow value in the option. However, if the addresses of the routers are prespecified in the option, then the overflow condition cannot happen: the option is structured so that each prespecified router has a place to write its timestamp. Other routers do not add a timestamp, so there will never be a lack of space. This fix ensures that the Overflow value in the IPv4 timestamp option is not incremented when the addresses of the routers are prespecified, even if the Pointer value is greater than the Length value. Signed-off-by: David Ward Signed-off-by: David S. Miller --- net/ipv4/ip_options.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..310a3647c83d 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -423,7 +423,7 @@ int ip_options_compile(struct net *net, put_unaligned_be32(midtime, timeptr); opt->is_changed = 1; } - } else { + } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; -- cgit v1.2.3 From 66d29cbc59433ba538922a9e958495156b31b83b Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Sun, 3 Mar 2013 21:48:46 +0000 Subject: benet: Wait f/w POST until timeout While PCI card faces EEH errors, reset (usually hot reset) is expected to recover from the EEH errors. After EEH core finishes the reset, the driver callback (be_eeh_reset) is called and wait the firmware to complete POST successfully. The original code would return with error once detecting failure during POST stage. That seems not enough. The patch forces the driver (be_eeh_reset) to wait the firmware completes POST until timeout, instead of returning error upon detection POST failure immediately. Also, it would improve the reliability of the EEH funtionality of the driver. Signed-off-by: Gavin Shan Acked-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 071aea79d218..813407f66c7c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -473,7 +473,7 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) return 0; } -static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) +static void be_POST_stage_get(struct be_adapter *adapter, u16 *stage) { u32 sem; u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH : @@ -481,11 +481,6 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) pci_read_config_dword(adapter->pdev, reg, &sem); *stage = sem & POST_STAGE_MASK; - - if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK) - return -1; - else - return 0; } int lancer_wait_ready(struct be_adapter *adapter) @@ -579,19 +574,17 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { - status = be_POST_stage_get(adapter, &stage); - if (status) { - dev_err(dev, "POST error; stage=0x%x\n", stage); - return -1; - } else if (stage != POST_STAGE_ARMFW_RDY) { - if (msleep_interruptible(2000)) { - dev_err(dev, "Waiting for POST aborted\n"); - return -EINTR; - } - timeout += 2; - } else { + be_POST_stage_get(adapter, &stage); + if (stage == POST_STAGE_ARMFW_RDY) return 0; + + dev_info(dev, "Waiting for POST, %ds elapsed\n", + timeout); + if (msleep_interruptible(2000)) { + dev_err(dev, "Waiting for POST aborted\n"); + return -EINTR; } + timeout += 2; } while (timeout < 60); dev_err(dev, "POST timeout; stage=0x%x\n", stage); -- cgit v1.2.3 From 35205b211c8d17a8a0b5e8926cb7c73e9a7ef1ad Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 5 Mar 2013 01:03:47 +0000 Subject: sfc: Disable soft interrupt handling during efx_device_detach_sync() efx_device_detach_sync() locks all TX queues before marking the device detached and thus disabling further TX scheduling. But it can still be interrupted by TX completions which then result in TX scheduling in soft interrupt context. This will deadlock when it tries to acquire a TX queue lock that efx_device_detach_sync() already acquired. To avoid deadlock, we must use netif_tx_{,un}lock_bh(). Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/efx.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 50247dfe8f57..d2f790df6dcb 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) * TX scheduler is stopped when we're done and before * netif_device_present() becomes false. */ - netif_tx_lock(dev); + netif_tx_lock_bh(dev); netif_device_detach(dev); - netif_tx_unlock(dev); + netif_tx_unlock_bh(dev); } #endif /* EFX_EFX_H */ -- cgit v1.2.3 From c73e787a8db9117d59b5180baf83203a42ecadca Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 5 Mar 2013 17:49:39 +0000 Subject: sfc: Correct efx_rx_buffer::page_offset when EFX_PAGE_IP_ALIGN != 0 RX DMA buffers start at an offset of EFX_PAGE_IP_ALIGN bytes from the start of a cache line. This offset obviously needs to be included in the virtual address, but this was missed in commit b590ace09d51 ('sfc: Fix efx_rx_buf_offset() in the presence of swiotlb') since EFX_PAGE_IP_ALIGN is equal to 0 on both x86 and powerpc. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 879ff5849bbd..bb579a6128c8 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; - rx_buf->page_offset = page_offset; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->flags = EFX_RX_BUF_PAGE; ++rx_queue->added_count; -- cgit v1.2.3 From f422d2a04fe2e661fd439c19197a162cc9a36416 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 5 Mar 2013 19:10:26 +0000 Subject: net: docs: document multiqueue tuntap API Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- Documentation/networking/tuntap.txt | 77 +++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt index c0aab985bad9..949d5dcdd9a3 100644 --- a/Documentation/networking/tuntap.txt +++ b/Documentation/networking/tuntap.txt @@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky Proto [2 bytes] Raw protocol(IP, IPv6, etc) frame. + 3.3 Multiqueue tuntap interface: + + From version 3.8, Linux supports multiqueue tuntap which can uses multiple + file descriptors (queues) to parallelize packets sending or receiving. The + device allocation is the same as before, and if user wants to create multiple + queues, TUNSETIFF with the same device name must be called many times with + IFF_MULTI_QUEUE flag. + + char *dev should be the name of the device, queues is the number of queues to + be created, fds is used to store and return the file descriptors (queues) + created to the caller. Each file descriptor were served as the interface of a + queue which could be accessed by userspace. + + #include + #include + + int tun_alloc_mq(char *dev, int queues, int *fds) + { + struct ifreq ifr; + int fd, err, i; + + if (!dev) + return -1; + + memset(&ifr, 0, sizeof(ifr)); + /* Flags: IFF_TUN - TUN device (no Ethernet headers) + * IFF_TAP - TAP device + * + * IFF_NO_PI - Do not provide packet information + * IFF_MULTI_QUEUE - Create a queue of multiqueue device + */ + ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE; + strcpy(ifr.ifr_name, dev); + + for (i = 0; i < queues; i++) { + if ((fd = open("/dev/net/tun", O_RDWR)) < 0) + goto err; + err = ioctl(fd, TUNSETIFF, (void *)&ifr); + if (err) { + close(fd); + goto err; + } + fds[i] = fd; + } + + return 0; + err: + for (--i; i >= 0; i--) + close(fds[i]); + return err; + } + + A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When + calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when + calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were + enabled by default after it was created through TUNSETIFF. + + fd is the file descriptor (queue) that we want to enable or disable, when + enable is true we enable it, otherwise we disable it + + #include + #include + + int tun_set_queue(int fd, int enable) + { + struct ifreq ifr; + + memset(&ifr, 0, sizeof(ifr)); + + if (enable) + ifr.ifr_flags = IFF_ATTACH_QUEUE; + else + ifr.ifr_flags = IFF_DETACH_QUEUE; + + return ioctl(fd, TUNSETQUEUE, (void *)&ifr); + } + Universal TUN/TAP device driver Frequently Asked Question. 1. What platforms are supported by TUN/TAP driver ? -- cgit v1.2.3 From c5b3ad4c67989c778e4753be4f91dc7193a04d21 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 5 Mar 2013 22:23:20 +0000 Subject: be2net: use CSR-BAR SEMAPHORE reg for BE2/BE3 The SLIPORT_SEMAPHORE register shadowed in the config-space may not reflect the correct POST stage after an EEH reset in BE2/3; it may return FW_READY state even though FW is not ready. This causes the driver to prematurely poll the FW mailbox and fail. For BE2/3 use the CSR-BAR/0xac instead. Reported-by: Gavin Shan Signed-off-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 1 + drivers/net/ethernet/emulex/benet/be_cmds.c | 15 +++++++++------ drivers/net/ethernet/emulex/benet/be_hw.h | 4 ++-- drivers/net/ethernet/emulex/benet/be_main.c | 10 ++++++++++ 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 28ceb8414185..29aff55f2eea 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -349,6 +349,7 @@ struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; + u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 813407f66c7c..3c9b4f12e3e5 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -473,14 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) return 0; } -static void be_POST_stage_get(struct be_adapter *adapter, u16 *stage) +static u16 be_POST_stage_get(struct be_adapter *adapter) { u32 sem; - u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH : - SLIPORT_SEMAPHORE_OFFSET_BE; - pci_read_config_dword(adapter->pdev, reg, &sem); - *stage = sem & POST_STAGE_MASK; + if (BEx_chip(adapter)) + sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); + else + pci_read_config_dword(adapter->pdev, + SLIPORT_SEMAPHORE_OFFSET_SH, &sem); + + return sem & POST_STAGE_MASK; } int lancer_wait_ready(struct be_adapter *adapter) @@ -574,7 +577,7 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { - be_POST_stage_get(adapter, &stage); + stage = be_POST_stage_get(adapter); if (stage == POST_STAGE_ARMFW_RDY) return 0; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 541d4530d5bf..62dc220695f7 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -32,8 +32,8 @@ #define MPU_EP_CONTROL 0 /********** MPU semphore: used for SH & BE *************/ -#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c -#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 +#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ #define POST_STAGE_MASK 0x0000FFFF #define POST_ERR_MASK 0x1 #define POST_ERR_SHIFT 31 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3860888ac711..08e54f3d288b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev) static void be_unmap_pci_bars(struct be_adapter *adapter) { + if (adapter->csr) + pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); } @@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter) adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> SLI_INTF_IF_TYPE_SHIFT; + if (BEx_chip(adapter) && be_physfn(adapter)) { + adapter->csr = pci_iomap(adapter->pdev, 2, 0); + if (adapter->csr == NULL) + return -ENOMEM; + } + addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); if (addr == NULL) goto pci_map_err; @@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) pci_restore_state(pdev); /* Check if card is ok and fw is ready */ + dev_info(&adapter->pdev->dev, + "Waiting for FW to be ready after EEH reset\n"); status = be_fw_wait_ready(adapter); if (status) return PCI_ERS_RESULT_DISCONNECT; -- cgit v1.2.3 From f8af75f3517a24838a36eb5797a1a3e60bf9e276 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 6 Mar 2013 11:02:37 +0000 Subject: tun: add a missing nf_reset() in tun_net_xmit() Dave reported following crash : general protection fault: 0000 [#1] SMP CPU 2 Pid: 25407, comm: qemu-kvm Not tainted 3.7.9-205.fc18.x86_64 #1 Hewlett-Packard HP Z400 Workstation/0B4Ch RIP: 0010:[] [] destroy_conntrack+0x35/0x120 [nf_conntrack] RSP: 0018:ffff880276913d78 EFLAGS: 00010206 RAX: 50626b6b7876376c RBX: ffff88026e530d68 RCX: ffff88028d158e00 RDX: ffff88026d0d5470 RSI: 0000000000000011 RDI: 0000000000000002 RBP: ffff880276913d88 R08: 0000000000000000 R09: ffff880295002900 R10: 0000000000000000 R11: 0000000000000003 R12: ffffffff81ca3b40 R13: ffffffff8151a8e0 R14: ffff880270875000 R15: 0000000000000002 FS: 00007ff3bce38a00(0000) GS:ffff88029fc40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00007fd1430bd000 CR3: 000000027042b000 CR4: 00000000000027e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process qemu-kvm (pid: 25407, threadinfo ffff880276912000, task ffff88028c369720) Stack: ffff880156f59100 ffff880156f59100 ffff880276913d98 ffffffff815534f7 ffff880276913db8 ffffffff8151a74b ffff880270875000 ffff880156f59100 ffff880276913dd8 ffffffff8151a5a6 ffff880276913dd8 ffff88026d0d5470 Call Trace: [] nf_conntrack_destroy+0x17/0x20 [] skb_release_head_state+0x7b/0x100 [] __kfree_skb+0x16/0xa0 [] kfree_skb+0x36/0xa0 [] skb_queue_purge+0x20/0x40 [] __tun_detach+0x117/0x140 [tun] [] tun_chr_close+0x3c/0xd0 [tun] [] __fput+0xec/0x240 [] ____fput+0xe/0x10 [] task_work_run+0xa7/0xe0 [] do_notify_resume+0x71/0xb0 [] int_signal+0x12/0x17 Code: 00 00 04 48 89 e5 41 54 53 48 89 fb 4c 8b a7 e8 00 00 00 0f 85 de 00 00 00 0f b6 73 3e 0f b7 7b 2a e8 10 40 00 00 48 85 c0 74 0e <48> 8b 40 28 48 85 c0 74 05 48 89 df ff d0 48 c7 c7 08 6a 3a a0 RIP [] destroy_conntrack+0x35/0x120 [nf_conntrack] RSP This is because tun_net_xmit() needs to call nf_reset() before queuing skb into receive_queue Reported-by: Dave Jones Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2c6a22e278ea..b7c457adc0dc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; skb_orphan(skb); + nf_reset(skb); + /* Enqueue packet */ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); -- cgit v1.2.3 From 4e0855dff094b0d56d6b5b271e0ce7851cc1e063 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 5 Mar 2013 09:42:59 +0000 Subject: e1000e: fix pci-device enable-counter balance This patch removes redundant and unbalanced pci_disable_device() from __e1000_shutdown(). pci_clear_master() is enough, device can go into suspended state with elevated enable_cnt. Bug was introduced in commit 23606cf5d1192c2b17912cb2ef6e62f9b11de133 ("e1000e / PCI / PM: Add basic runtime PM support (rev. 4)") in v2.6.35 Cc: Bruce Allan CC: Stable Signed-off-by: Konstantin Khlebnikov Acked-by: Rafael J. Wysocki Tested-by: Borislav Petkov Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a177b8b65c44..1799021944eb 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5986,7 +5986,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, */ e1000e_release_hw_control(adapter); - pci_disable_device(pdev); + pci_clear_master(pdev); return 0; } -- cgit v1.2.3 From 66148babe728f3e00e13c56f6b0ecf325abd80da Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 5 Mar 2013 09:43:04 +0000 Subject: e1000e: fix runtime power management transitions This patch removes redundant actions from driver and fixes its interaction with actions in pci-bus runtime power management code. It removes pci_save_state() from __e1000_shutdown() for normal adapters, PCI bus callbacks pci_pm_*() will do all this for us. Now __e1000_shutdown() switches to D3-state only quad-port adapters, because they needs quirk for clearing false-positive error from downsteam pci-e port. pci_save_state() now called after clearing bus-master bit, thus __e1000_resume() and e1000_io_slot_reset() must set it back after restoring configuration space. This patch set get_link_status before calling pm_runtime_put() in e1000_open() to allow e1000_idle() get real link status and schedule first runtime suspend. This patch also enables wakeup for device if management mode is enabled (like for WoL) as result pci_prepare_to_sleep() would setup wakeup without special actions like custom 'enable_wakeup' sign. Cc: Bruce Allan Signed-off-by: Konstantin Khlebnikov Acked-by: Rafael J. Wysocki Tested-by: Borislav Petkov Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 78 +++++++----------------------- 1 file changed, 18 insertions(+), 60 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1799021944eb..2954cc7352e1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); adapter->idle_check = true; + hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); /* fire a link status change interrupt to start the watchdog */ @@ -5887,8 +5888,7 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5912,10 +5912,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, } e1000e_reset_interrupt_capability(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; - status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5971,13 +5967,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, ew32(WUFC, 0); } - *enable_wake = !!wufc; - - /* make sure adapter isn't asleep if manageability is enabled */ - if ((adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - *enable_wake = true; - if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -5988,26 +5977,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, pci_clear_master(pdev); - return 0; -} - -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) -{ - if (sleep && wake) { - pci_prepare_to_sleep(pdev); - return; - } - - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, - bool wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the @@ -6021,12 +5990,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - e1000_power_off(pdev, sleep, wake); + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); - } else { - e1000_power_off(pdev, sleep, wake); } + + return 0; } #ifdef CONFIG_PCIEASPM @@ -6084,9 +6054,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); + pci_set_master(pdev); e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { @@ -6152,14 +6120,8 @@ static int __e1000_resume(struct pci_dev *pdev) static int e1000_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake, false); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - return retval; + return __e1000_shutdown(pdev, false); } static int e1000_resume(struct device *dev) @@ -6182,13 +6144,10 @@ static int e1000_runtime_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (e1000e_pm_ready(adapter)) { - bool wake; - - __e1000_shutdown(pdev, &wake, true); - } + if (!e1000e_pm_ready(adapter)) + return 0; - return 0; + return __e1000_shutdown(pdev, true); } static int e1000_idle(struct device *dev) @@ -6226,12 +6185,7 @@ static int e1000_runtime_resume(struct device *dev) static void e1000_shutdown(struct pci_dev *pdev) { - bool wake = false; - - __e1000_shutdown(pdev, &wake, false); - - if (system_state == SYSTEM_POWER_OFF) - e1000_complete_shutdown(pdev, false, wake); + __e1000_shutdown(pdev, false); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -6352,9 +6306,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - pci_set_master(pdev); pdev->state_saved = true; pci_restore_state(pdev); + pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -6783,7 +6737,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); -- cgit v1.2.3 From e60b22c5b7e59db09a7c9490b1e132c7e49ae904 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 5 Mar 2013 09:43:09 +0000 Subject: e1000e: fix accessing to suspended device This patch fixes some annoying messages like 'Error reading PHY register' and 'Hardware Erorr' and saves several seconds on reboot. Cc: Bruce Allan Signed-off-by: Konstantin Khlebnikov Acked-by: Rafael J. Wysocki Tested-by: Borislav Petkov Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ethtool.c | 13 +++++++++++++ drivers/net/ethernet/intel/e1000e/netdev.c | 2 ++ 2 files changed, 15 insertions(+) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 2c1813737f6d..f91a8f3f9d48 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "e1000.h" @@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, return 0; } +static int e1000e_ethtool_begin(struct net_device *netdev) +{ + return pm_runtime_get_sync(netdev->dev.parent); +} + +static void e1000e_ethtool_complete(struct net_device *netdev) +{ + pm_runtime_put_sync(netdev->dev.parent); +} + static const struct ethtool_ops e1000_ethtool_ops = { + .begin = e1000e_ethtool_begin, + .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2954cc7352e1..948b86ffa4f0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4663,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; + pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); @@ -4673,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); + pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults -- cgit v1.2.3 From 3fb817f1cd54bedd23e8913051473d574a0f1717 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 7 Mar 2013 03:46:52 +0000 Subject: net/mlx4_core: Disable mlx4_QP_ATTACH calls from guests if the host uses flow steering Guests kernels may not correctly detect if DMFS (device-enabled flow steering) is activated by the host. If DMFS is activated, the master should return error to guests which try to use the B0-steering flow calls (mlx4_QP_ATTACH). Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 083fb48dc3d7..2995687f1aee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, u8 steer_type_mask = 2; enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; + if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0) + return -EINVAL; + qpn = vhcr->in_modifier & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) -- cgit v1.2.3 From 0081c8f3814a8344ca975c085d987ec6c90499ae Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 7 Mar 2013 03:46:53 +0000 Subject: net/mlx4_core: Turn off device-managed FS bit in dev-cap wrapper if DMFS is not enabled Older kernels detect DMFS (device-managed flow steering) from the HCA device capability directly, regardless of whether the capability was enabled in INIT_HCA, this is fixed by commit 7b8157bed "mlx4_core: Adjustments to Flow Steering activation logic for SR-IOV" To protect against guests running kernels without this fix, the host driver should turn off the DMFS capability bit in mlx4_QUERY_DEV_CAP_wrapper. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 50917eb3013e..f6245579962d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + /* turn off device-managed steering capability if not enabled */ + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(field, outbox->buf, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + } return 0; } -- cgit v1.2.3 From e7dbeba85600aa2c8daf99f8f53d9ad27e88b810 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 7 Mar 2013 03:46:54 +0000 Subject: net/mlx4_core: Fix endianness bug in set_param_l The set_param_l function assumes casting a u64 pointer to a u32 pointer allows to access the lower 32bits, but it results in writing the upper 32 bits on big endian systems. The fixed function reads the upper 32 bits of the 64 argument, and or's them with the 32 bits of the 32-bit value passed to the function. Since this is now a "read-modify-write" operation, we got many "unintialized variable" warnings which needed to be fixed as well. Reported-by: Alexander Schmidt . Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cq.c | 2 +- drivers/net/ethernet/mellanox/mlx4/main.c | 2 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 +- drivers/net/ethernet/mellanox/mlx4/mr.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx4/pd.c | 2 +- drivers/net/ethernet/mellanox/mlx4/port.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx4/qp.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx4/srq.c | 2 +- 8 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 7e64033d7de3..0706623cfb96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d180bc46826a..16abde20e1fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cf883345af88..d738454116a0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); static inline void set_param_l(u64 *arg, u32 val) { - *((u32 *)arg) = val; + *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; } static inline void set_param_h(u64 *arg, u32 val) diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 602ca9bf78e4..f91719a08cba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); @@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, index); @@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 1ac88637ad9d..00f223acada7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 719ead15e491..10c57c86388b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&out_param, port); @@ -361,7 +361,7 @@ out: int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -406,7 +406,7 @@ out: void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 81e2abe07bbb..e891b058c1be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -319,7 +319,7 @@ err_out: static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, qpn); @@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, qpn); diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index feda6c00829f..e329fe1f11b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, srqn); -- cgit v1.2.3 From bfa8ab47415a87c6c93a9e54e16f2f8cc6de79af Mon Sep 17 00:00:00 2001 From: Yan Burman Date: Thu, 7 Mar 2013 03:46:55 +0000 Subject: net/mlx4_en: Fix race when setting the device MAC address Remove unnecessary use of workqueue for the device MAC address setting flow, and fix a race when setting MAC address which was introduced by commit c07cb4b0a "net/mlx4_en: Manage hash of MAC addresses per port" The race happened when mlx4_en_replace_mac was being executed in parallel with a successive call to ndo_set_mac_address, e.g witn an A/B/A MAC setting configuration test, the third set fails. With this change we also properly report an error if set MAC fails. Signed-off-by: Yan Burman Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 42 +++++++++++++------------- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 - 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb4d8d99f36d..217e618fd712 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -650,28 +650,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr) return mac; } -static int mlx4_en_set_mac(struct net_device *dev, void *addr) +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) { - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - queue_work(mdev->workqueue, &priv->mac_task); - return 0; -} - -static void mlx4_en_do_set_mac(struct work_struct *work) -{ - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - mac_task); - struct mlx4_en_dev *mdev = priv->mdev; int err = 0; - mutex_lock(&mdev->state_lock); if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_en_replace_mac(priv, priv->base_qpn, @@ -683,7 +665,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work) } else en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + return err; +} + +static int mlx4_en_set_mac(struct net_device *dev, void *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + + mutex_lock(&mdev->state_lock); + err = mlx4_en_do_set_mac(priv); mutex_unlock(&mdev->state_lock); + + return err; } static void mlx4_en_clear_list(struct net_device *dev) @@ -1348,7 +1349,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { - queue_work(mdev->workqueue, &priv->mac_task); + mlx4_en_do_set_mac(priv); mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; } mutex_unlock(&mdev->state_lock); @@ -2078,7 +2079,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c313d7e943a9..f710b7ce0dcb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -509,7 +509,6 @@ struct mlx4_en_priv { struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct mac_task; struct work_struct watchdog_task; struct work_struct linkstate_task; struct delayed_work stats_task; -- cgit v1.2.3 From 83a5a6cef40616d19a388f560447e99c2ca04d1e Mon Sep 17 00:00:00 2001 From: Yan Burman Date: Thu, 7 Mar 2013 03:46:56 +0000 Subject: net/mlx4_en: Cleanup MAC resources on module unload or port stop Make sure we cleanup all MAC related resources (entries in the port MAC table and steering rules) when stopping a port or when the driver is unloaded. The leak was introduced by commit 07cb4b0a "net/mlx4_en: Manage hash of MAC addresses per port". Signed-off-by: Yan Burman Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 36 ++++++++++++++------------ 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 217e618fd712..7fd0936967c4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int qpn = priv->base_qpn; - u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); - - en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", - priv->dev->dev_addr); - mlx4_unregister_mac(dev, priv->port, mac); + u64 mac; - if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + } else { struct mlx4_mac_entry *entry; struct hlist_node *tmp; struct hlist_head *bucket; - unsigned int mac_hash; + unsigned int i; - mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; - bucket = &priv->mac_hash[mac_hash]; - hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - if (ether_addr_equal_64bits(entry->mac, - priv->dev->dev_addr)) { - en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", - priv->port, priv->dev->dev_addr, qpn); + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + mac = mlx4_en_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, qpn, entry->reg_id); - mlx4_qp_release_range(dev, qpn, 1); + mlx4_unregister_mac(dev, priv->port, mac); hlist_del_rcu(&entry->hlist); kfree_rcu(entry, rcu); - break; } } + + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", + priv->port, qpn); + mlx4_qp_release_range(dev, qpn, 1); + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; } } -- cgit v1.2.3 From a229e488ac3f904d06c20d8d3f47831db3c7a15a Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 7 Mar 2013 03:46:57 +0000 Subject: net/mlx4_en: Disable RFS when running in SRIOV mode Commit 37706996 "mlx4_en: fix allocation of CPU affinity reverse-map" fixed a bug when mlx4_dev->caps.comp_pool is larger from the device rx rings, but introduced a regression. When the mlx4_core is activating its "legacy mode" (e.g when running in SRIOV mode) w.r.t to EQs/IRQs usage, comp_pool becomes zero and we're crashing on divide by zero alloc_cpu_rmap. Fix that by enabling RFS only when running in non-legacy mode. Reported-by: Yan Burman Cc: Kleber Sacilotto de Souza Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7fd0936967c4..995d4b6d5c1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1833,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } #endif return 0; -- cgit v1.2.3 From e4fabf2b6e6d75752d5eede57f23ff8e9c6aa09b Mon Sep 17 00:00:00 2001 From: Bhavesh Davda Date: Wed, 6 Mar 2013 12:04:53 +0000 Subject: vmxnet3: prevent div-by-zero panic when ring resizing uninitialized dev Linux is free to call ethtool ops as soon as a netdev exists when probe finishes. However, we only allocate vmxnet3 tx/rx queues and initialize the rx_buf_per_pkt field in struct vmxnet3_adapter when the interface is opened (UP). Signed-off-by: Bhavesh Davda Signed-off-by: Shreyas N Bhatewara Signed-off-by: David S. Miller --- drivers/net/vmxnet3/vmxnet3_drv.c | 1 + drivers/net/vmxnet3/vmxnet3_ethtool.c | 6 ++++++ drivers/net/vmxnet3/vmxnet3_int.h | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4aad350e4dae..eae7a03d4f9b 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->num_rx_queues = num_rx_queues; adapter->num_tx_queues = num_tx_queues; + adapter->rx_buf_per_pkt = 1; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index a0feb17a0238..63a124340cbe 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev, VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; + /* if adapter not yet initialized, do nothing */ + if (adapter->rx_buf_per_pkt == 0) { + netdev_err(netdev, "adapter not completely initialized, " + "ring size cannot be changed yet\n"); + return -EOPNOTSUPP; + } /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3198384689d9..35418146fa17 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -70,10 +70,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 +#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ -- cgit v1.2.3 From 9cb6cb7ed11cd3b69c47bb414983603a6ff20b1d Mon Sep 17 00:00:00 2001 From: Zang MingJie Date: Wed, 6 Mar 2013 04:37:37 +0000 Subject: vxlan: fix oops when delete netns containing vxlan The following script will produce a kernel oops: sudo ip netns add v sudo ip netns exec v ip ad add 127.0.0.1/8 dev lo sudo ip netns exec v ip link set lo up sudo ip netns exec v ip ro add 224.0.0.0/4 dev lo sudo ip netns exec v ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev lo sudo ip netns exec v ip link set vxlan0 up sudo ip netns del v where inspect by gdb: Program received signal SIGSEGV, Segmentation fault. [Switching to Thread 107] 0xffffffffa0289e33 in ?? () (gdb) bt #0 vxlan_leave_group (dev=0xffff88001bafa000) at drivers/net/vxlan.c:533 #1 vxlan_stop (dev=0xffff88001bafa000) at drivers/net/vxlan.c:1087 #2 0xffffffff812cc498 in __dev_close_many (head=head@entry=0xffff88001f2e7dc8) at net/core/dev.c:1299 #3 0xffffffff812cd920 in dev_close_many (head=head@entry=0xffff88001f2e7dc8) at net/core/dev.c:1335 #4 0xffffffff812cef31 in rollback_registered_many (head=head@entry=0xffff88001f2e7dc8) at net/core/dev.c:4851 #5 0xffffffff812cf040 in unregister_netdevice_many (head=head@entry=0xffff88001f2e7dc8) at net/core/dev.c:5752 #6 0xffffffff812cf1ba in default_device_exit_batch (net_list=0xffff88001f2e7e18) at net/core/dev.c:6170 #7 0xffffffff812cab27 in cleanup_net (work=) at net/core/net_namespace.c:302 #8 0xffffffff810540ef in process_one_work (worker=0xffff88001ba9ed40, work=0xffffffff8167d020) at kernel/workqueue.c:2157 #9 0xffffffff810549d0 in worker_thread (__worker=__worker@entry=0xffff88001ba9ed40) at kernel/workqueue.c:2276 #10 0xffffffff8105870c in kthread (_create=0xffff88001f2e5d68) at kernel/kthread.c:168 #11 #12 0x0000000000000000 in ?? () #13 0x0000000000000000 in ?? () (gdb) fr 0 #0 vxlan_leave_group (dev=0xffff88001bafa000) at drivers/net/vxlan.c:533 533 struct sock *sk = vn->sock->sk; (gdb) l 528 static int vxlan_leave_group(struct net_device *dev) 529 { 530 struct vxlan_dev *vxlan = netdev_priv(dev); 531 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 532 int err = 0; 533 struct sock *sk = vn->sock->sk; 534 struct ip_mreqn mreq = { 535 .imr_multiaddr.s_addr = vxlan->gaddr, 536 .imr_ifindex = vxlan->link, 537 }; (gdb) p vn->sock $4 = (struct socket *) 0x0 The kernel calls `vxlan_exit_net` when deleting the netns before shutting down vxlan interfaces. Later the removal of all vxlan interfaces, where `vn->sock` is already gone causes the oops. so we should manually shutdown all interfaces before deleting `vn->sock` as the patch does. Signed-off-by: Zang MingJie Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c3e3d2929ee3..7cee7a3068ec 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1506,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net) static __net_exit void vxlan_exit_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_dev *vxlan; + unsigned h; + + rtnl_lock(); + for (h = 0; h < VNI_HASH_SIZE; ++h) + hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) + dev_close(vxlan->dev); + rtnl_unlock(); if (vn->sock) { sk_release_kernel(vn->sock->sk); -- cgit v1.2.3 From 80028ea1c0afc24d4ddeb8dd2a9992fff03616ca Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Wed, 6 Mar 2013 07:10:32 +0000 Subject: bonding: fire NETDEV_RELEASE event only on 0 slaves Currently, if we set up netconsole over bonding and release a slave, netconsole will stop logging on the whole bonding device. Change the behavior to stop the netconsole only when the last slave is released. Signed-off-by: Veaceslav Falico Acked-by: Neil Horman Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7bd068a6056a..8b4e96e01d6c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev, } block_netpoll_tx(); - call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev, write_unlock_bh(&bond->lock); unblock_netpoll_tx(); - if (bond->slave_cnt == 0) + if (bond->slave_cnt == 0) { call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); + call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); + } bond_compute_features(bond); if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && -- cgit v1.2.3 From 260055bb1f1f8b5328601816c50fd7e0dfda7dff Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Wed, 6 Mar 2013 07:49:02 +0000 Subject: mv643xx_eth: fix for disabled autoneg When autoneg has been disabled in the PHY (Marvell 88E1118 here), auto negotiation between MAC and PHY seem non-functional anymore. The only way I found to workaround this is to manually configure the MAC with the settings sent to the PHY earlier. Signed-off-by: Phil Sutter Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 55 +++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 29140502b71a..6562c736a1d8 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) /* mii management interface *************************************************/ +static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) +{ + u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); + u32 autoneg_disable = FORCE_LINK_PASS | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_FOR_DUPLEX; + + if (mp->phy->autoneg == AUTONEG_ENABLE) { + /* enable auto negotiation */ + pscr &= ~autoneg_disable; + goto out_write; + } + + pscr |= autoneg_disable; + + if (mp->phy->speed == SPEED_1000) { + /* force gigabit, half duplex not supported */ + pscr |= SET_GMII_SPEED_TO_1000; + pscr |= SET_FULL_DUPLEX_MODE; + goto out_write; + } + + pscr &= ~SET_GMII_SPEED_TO_1000; + + if (mp->phy->speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + else + pscr &= ~SET_MII_SPEED_TO_100; + + if (mp->phy->duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + else + pscr &= ~SET_FULL_DUPLEX_MODE; + +out_write: + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) { struct mv643xx_eth_shared_private *msp = dev_id; @@ -1499,6 +1538,7 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; if (mp->phy == NULL) return -EINVAL; @@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ cmd->advertising &= ~ADVERTISED_1000baseT_Half; - return phy_ethtool_sset(mp->phy, cmd); + ret = phy_ethtool_sset(mp->phy, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static void mv643xx_eth_get_drvinfo(struct net_device *dev, @@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev) static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; - if (mp->phy != NULL) - return phy_mii_ioctl(mp->phy, ifr, cmd); + if (mp->phy == NULL) + return -ENOTSUPP; - return -EOPNOTSUPP; + ret = phy_mii_ioctl(mp->phy, ifr, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) -- cgit v1.2.3 From 0c1233aba1e948c37f6dc7620cb7c253fcd71ce9 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Wed, 6 Mar 2013 11:45:24 +0000 Subject: netlabel: correctly list all the static label mappings When we have a large number of static label mappings that spill across the netlink message boundary we fail to properly save our state in the netlink_callback struct which causes us to repeat the same listings. This patch fixes this problem by saving the state correctly between calls to the NetLabel static label netlink "dumpit" routines. Signed-off-by: Paul Moore Signed-off-by: David S. Miller --- net/netlabel/netlabel_unlabeled.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..85742b76eb3e 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1250,10 +1250,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, unlabel_staticlist_return: rcu_read_unlock(); - cb->args[0] = skip_bkt; - cb->args[1] = skip_chain; - cb->args[2] = skip_addr4; - cb->args[3] = skip_addr6; + cb->args[0] = iter_bkt; + cb->args[1] = iter_chain; + cb->args[2] = iter_addr4; + cb->args[3] = iter_addr6; return skb->len; } @@ -1320,8 +1320,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, unlabel_staticlistdef_return: rcu_read_unlock(); - cb->args[0] = skip_addr4; - cb->args[1] = skip_addr6; + cb->args[0] = iter_addr4; + cb->args[1] = iter_addr6; return skb->len; } -- cgit v1.2.3 From 195ca382ca253431cc02c82bca61126c8a7ae155 Mon Sep 17 00:00:00 2001 From: Sony Chacko Date: Wed, 6 Mar 2013 13:03:25 +0000 Subject: MAINTAINERS: Update qlcnic maintainers list Signed-off-by: Sony Chacko Signed-off-by: David S. Miller --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 95616582c728..c08411b27499 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6412,6 +6412,8 @@ F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER +M: Rajesh Borundia +M: Shahed Shaikh M: Jitendra Kalsaria M: Sony Chacko M: linux-driver@qlogic.com -- cgit v1.2.3 From d0d79c3fd77abe39654c2e594149f1f9ef1eeb05 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 6 Mar 2013 20:48:47 +0000 Subject: afkey: fix a typo Signed-off-by: Martin Zhang Signed-off-by: David S. Miller --- net/key/af_key.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..8555f331ea60 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); xp->priority = pol->sadb_x_policy_priority; - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); if (!xp->family) { err = -EINVAL; @@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ if (xp->selector.sport) xp->selector.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); xp->selector.prefixlen_d = sa->sadb_address_prefixlen; @@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa memset(&sel, 0, sizeof(sel)); - sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); sel.prefixlen_s = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa if (sel.sport) sel.sport_mask = htons(0xffff); - sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], + sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); sel.prefixlen_d = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); -- cgit v1.2.3 From c10cb5fc0fc9fa605e01f715118bde5ba5a98616 Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Thu, 7 Mar 2013 02:34:33 +0000 Subject: Fix: sparse warning in inet_csk_prepare_forced_close In e337e24d66 (inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock) I introduced the function inet_csk_prepare_forced_close, which does a call to bh_unlock_sock(). This produces a sparse-warning. This patch adds the missing __releases. Signed-off-by: Christoph Paasch Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); * tcp/dccp_create_openreq_child(). */ void inet_csk_prepare_forced_close(struct sock *sk) + __releases(&sk->sk_lock.slock) { /* sk_clone_lock locked the socket and set refcnt to 2 */ bh_unlock_sock(sk); -- cgit v1.2.3 From fbca58a2242ef2b84049365786d501ee512aefcf Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 7 Mar 2013 03:05:33 +0000 Subject: bridge: add missing vid to br_mdb_get() Obviously, vid should be considered when searching for multicast group. Cc: Vlad Yasevich Cc: Stephen Hemminger Cc: "David S. Miller" Signed-off-by: Cong Wang Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/bridge/br_device.c | 2 +- net/bridge/br_input.c | 2 +- net/bridge/br_multicast.c | 3 ++- net/bridge/br_private.h | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_broadcast_ether_addr(dest)) skb2 = skb; else if (is_multicast_ether_addr(dest)) { - mdst = br_mdb_get(br, skb); + mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || br_multicast_is_router(br)) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( #endif struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; @@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, return NULL; ip.proto = skb->protocol; + ip.vid = vid; switch (skb->protocol) { case htons(ETH_P_IP): diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..3cbf5beb3d4b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb); extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb); + struct sk_buff *skb, u16 vid); extern void br_multicast_add_port(struct net_bridge_port *port); extern void br_multicast_del_port(struct net_bridge_port *port); extern void br_multicast_enable_port(struct net_bridge_port *port); @@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, } static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, - struct sk_buff *skb) + struct sk_buff *skb, u16 vid) { return NULL; } -- cgit v1.2.3 From ba81276b1a5e3cf0674cb0e6d9525e5ae0c98695 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 7 Mar 2013 07:59:25 +0000 Subject: team: unsyc the devices addresses when port is removed When a team port is removed, unsync all devices addresses that may have been synched to the port devices. CC: Jiri Pirko Signed-off-by: Vlad Yasevich Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 05c5efe84591..bf3419297875 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); -- cgit v1.2.3 From 87ab7f6f2874f1115817e394a7ed2dea1c72549e Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 7 Mar 2013 10:21:48 +0000 Subject: macvlan: Set IFF_UNICAST_FLT flag to prevent unnecessary promisc mode. Macvlan already supports hw address filters. Set the IFF_UNICAST_FLT so that it doesn't needlesly enter PROMISC mode when macvlans are stacked. Signed-of-by: Vlad Yasevich Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 417b2af1aa80..73abbc1655d5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &macvlan_hard_header_ops, -- cgit v1.2.3 From 5f3347e6e75768985a088d959c49fb66263087b6 Mon Sep 17 00:00:00 2001 From: Yaniv Rosner Date: Thu, 7 Mar 2013 13:27:33 +0000 Subject: bnx2x: Fix intermittent long KR2 link up time When a KR2 device is connected to a KR link-partner, sometimes it requires disabling KR2 for the link to come up. To get a KR2 link up later, in case no base pages are seen, the KR2 is restored. The problem was that some link partners cleared their advertised BP/NP after around two seconds, causing the driver to disable/enable KR2 link all the time. The fix was to wait at least 5 seconds before checking KR2 recovery. Signed-off-by: Yaniv Rosner Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 11 +++++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 31c5787970db..00ac4932f4c4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -12527,6 +12527,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; /* Driver opens NIG-BRB filters */ bnx2x_set_rx_filter(params, 1); /* Check if link flap can be avoided */ @@ -13411,6 +13412,7 @@ static void bnx2x_disable_kr2(struct link_params *params, vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; bnx2x_update_link_attr(params, vars->link_attr_sync); + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; /* Restart AN on leading lane */ bnx2x_warpcore_restart_AN_KR(phy, params); } @@ -13439,6 +13441,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params, return; } + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * since some switches tend to reinit the AN process and clear the + * advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } lane = bnx2x_get_warpcore_lane(phy, params); CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, lane); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index be5c195d03dd..f92fcf71be04 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -342,7 +342,8 @@ struct link_vars { u32 link_status; u32 eee_status; u8 fault_detected; - u8 rsrv1; + u8 check_kr2_recovery_cnt; +#define CHECK_KR2_RECOVERY_CNT 5 u16 periodic_flags; #define PERIODIC_FLAGS_LINK_EVENT 0x0001 -- cgit v1.2.3 From d9169323308a63fdd967920b9c63a00394ae7c85 Mon Sep 17 00:00:00 2001 From: Yaniv Rosner Date: Thu, 7 Mar 2013 13:27:34 +0000 Subject: bnx2x: Fix SFP+ misconfiguration in iSCSI boot scenario Fix a problem in which iSCSI-boot installation fails when switching SFP+ boot port and moving the SFP+ module prior to boot. The SFP+ insertion triggers an interrupt which configures the SFP+ module wrongly before interface is loaded. Signed-off-by: Yaniv Rosner Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 6 +++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 00ac4932f4c4..77ebae0ac64a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC6, &rx_tx_in_reset); - if (!rx_tx_in_reset) { + if ((!rx_tx_in_reset) && + (params->link_flags & + PHY_INITIALIZED)) { bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_sfi(phy, params); bnx2x_warpcore_reset_lane(bp, phy, 0); @@ -12528,6 +12530,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; vars->check_kr2_recovery_cnt = 0; + params->link_flags = PHY_INITIALIZED; /* Driver opens NIG-BRB filters */ bnx2x_set_rx_filter(params, 1); /* Check if link flap can be avoided */ @@ -12692,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params, struct bnx2x *bp = params->bp; vars->link_up = 0; vars->phy_flags = 0; + params->link_flags &= ~PHY_INITIALIZED; if (!params->lfa_base) return bnx2x_link_reset(params, vars, 1); /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index f92fcf71be04..56c2aae4e2c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -309,6 +309,7 @@ struct link_params { req_flow_ctrl is set to AUTO */ u16 link_flags; #define LINK_FLAGS_INT_DISABLED (1<<0) +#define PHY_INITIALIZED (1<<1) u32 lfa_base; }; -- cgit v1.2.3 From 2e85d67690cf3ea3f074a6e872f675226883fe7f Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 7 Mar 2013 17:19:32 +0000 Subject: net/rds: zero last byte for strncpy for NUL terminated string, need be always sure '\0' in the end. additional info: strncpy will pads with zeroes to the end of the given buffer. should initialise every bit of memory that is going to be copied to userland Signed-off-by: Chen Gang Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- net/rds/stats.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c @@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, for (i = 0; i < nr; i++) { BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); + ctr.name[sizeof(ctr.name) - 1] = '\0'; ctr.value = values[i]; rds_info_copy(iter, &ctr, sizeof(ctr)); -- cgit v1.2.3 From f39479363e0361c8bb4397481c01a7c3a1a3c8ac Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Thu, 7 Mar 2013 18:25:41 +0000 Subject: drivers/isdn: checkng length to be sure not memory overflow sizeof (cmd.parm.cmsg.para) is 50 (MAX_CAPI_PARA_LEN). sizeof (cmd.parm) is 80+, but less than 100. strlen(msg) may be more than 80+ (Modem-Commandbuffer, less than 255). isdn_tty_send_msg is called by isdn_tty_parse_at the relative parameter is m->mdmcmd (atemu *m) the relative command may be "+M..." so need check the length to be sure not memory overflow. cmd.parm is a union, and need keep original valid buffer length no touch Signed-off-by: Chen Gang Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_tty.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index d8a7d8323414..ebaebdf30f98 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg) int j; int l; - l = strlen(msg); + l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg) + + sizeof(cmd.parm.cmsg.para) - 2); + if (!l) { isdn_tty_modem_result(RESULT_ERROR, info); return; -- cgit v1.2.3 From a6a8fe950e1b8596bb06f2c89c3a1a4bf2011ba9 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 8 Mar 2013 09:45:39 -0500 Subject: netlabel: fix build problems when CONFIG_IPV6=n My last patch to solve a problem where the static/fallback labels were not fully displayed resulted in build problems when IPv6 was disabled. This patch resolves the IPv6 build problems; sorry for the screw-up. Please queue for -stable or simply merge with the previous patch. Reported-by: Kbuild Test Robot Signed-off-by: Paul Moore Signed-off-by: David S. Miller --- net/netlabel/netlabel_unlabeled.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 85742b76eb3e..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; - u32 skip_addr4 = cb->args[2]; - u32 skip_addr6 = cb->args[3]; u32 iter_bkt; u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; @@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[2]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[3]) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, { struct netlbl_unlhsh_walk_arg cb_arg; struct netlbl_unlhsh_iface *iface; - u32 skip_addr4 = cb->args[0]; - u32 skip_addr6 = cb->args[1]; - u32 iter_addr4 = 0; + u32 iter_addr4 = 0, iter_addr6 = 0; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) - u32 iter_addr6 = 0; struct netlbl_af6list *addr6; #endif @@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, goto unlabel_staticlistdef_return; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < skip_addr4) + if (iter_addr4++ < cb->args[0]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < skip_addr6) + if (iter_addr6++ < cb->args[1]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, -- cgit v1.2.3 From 3bc1b1add7a8484cc4a261c3e128dbe1528ce01f Mon Sep 17 00:00:00 2001 From: Cristian Bercaru Date: Fri, 8 Mar 2013 07:03:38 +0000 Subject: bridging: fix rx_handlers return code The frames for which rx_handlers return RX_HANDLER_CONSUMED are no longer counted as dropped. They are counted as successfully received by 'netif_receive_skb'. This allows network interface drivers to correctly update their RX-OK and RX-DRP counters based on the result of 'netif_receive_skb'. Signed-off-by: Cristian Bercaru Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/dev.c b/net/core/dev.c index 8f152f904f70..dffbef70cd31 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3444,6 +3444,7 @@ ncls: } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: + ret = NET_RX_SUCCESS; goto unlock; case RX_HANDLER_ANOTHER: goto another_round; -- cgit v1.2.3 From ddf64354af4a702ee0b85d0a285ba74c7278a460 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Fri, 8 Mar 2013 02:07:23 +0000 Subject: ipv6: stop multicast forwarding to process interface scoped addresses v2: a) used struct ipv6_addr_props v3: a) reverted changes for ipv6_addr_props v4: a) do not use __ipv6_addr_needs_scope_id Cc: YOSHIFUJI Hideaki Signed-off-by: Hannes Frederic Sowa Acked-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- net/ipv6/ip6_input.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b1876e52091e..e33fe0ab2568 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -281,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && - !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && + !(ipv6_addr_type(&hdr->daddr) & + (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate -- cgit v1.2.3 From 84421b99cedc3443e76d2a594f3c815d5cb9a8e1 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Fri, 8 Mar 2013 08:01:24 +0000 Subject: tg3: Update link_up flag for phylib devices Commit f4a46d1f46a8fece34edd2023e054072b02e110d introduced a bug where the ifconfig stats would remain 0 for phylib devices. This is due to tp->link_up flag never becoming true causing tg3_periodic_fetch_stats() to return. The link_up flag was being updated in tg3_test_and_report_link_chg() after setting up the phy. This function however, is not called for phylib devices since the driver does not do the phy setup. This patch moves the link_up flag update into the common tg3_link_report() function that gets called for phylib devices as well for non phylib devices when the link state changes. To avoid updating link_up twice, we replace tg3_carrier_...() calls that are followed by tg3_link_report(), with netif_carrier_...(). We can then remove the unused tg3_carrier_on() function. CC: Reported-by: OGAWA Hirofumi Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index fdb9b5655414..93729f942358 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp) tg3_ump_link_report(tp); } + + tp->link_up = netif_carrier_ok(tp->dev); } static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) @@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) return err; } -static void tg3_carrier_on(struct tg3 *tp) -{ - netif_carrier_on(tp->dev); - tp->link_up = true; -} - static void tg3_carrier_off(struct tg3 *tp) { netif_carrier_off(tp->dev); @@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp) return -EBUSY; if (netif_running(tp->dev) && tp->link_up) { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); tg3_link_report(tp); } @@ -4262,9 +4258,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) { if (curr_link_up != tp->link_up) { if (curr_link_up) { - tg3_carrier_on(tp); + netif_carrier_on(tp->dev); } else { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } -- cgit v1.2.3 From c085c49920b2f900ba716b4ca1c1a55ece9872cc Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 9 Mar 2013 05:52:19 +0000 Subject: bridge: fix mdb info leaks The bridging code discloses heap and stack bytes via the RTM_GETMDB netlink interface and via the notify messages send to group RTNLGRP_MDB afer a successful add/del. Fix both cases by initializing all unset members/padding bytes with memset(0). Cc: Stephen Hemminger Signed-off-by: Mathias Krause Signed-off-by: David S. Miller --- net/bridge/br_mdb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, port = p->port; if (port) { struct br_mdb_entry e; + memset(&e, 0, sizeof(e)); e.ifindex = port->dev->ifindex; e.state = p->state; if (p->addr.proto == htons(ETH_P_IP)) @@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) break; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->ifindex = dev->ifindex; if (br_mdb_fill_info(skb, cb, dev) < 0) goto out; @@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, return -EMSGSIZE; bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); bpm->family = AF_BRIDGE; bpm->ifindex = dev->ifindex; nest = nla_nest_start(skb, MDBA_MDB); @@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, { struct br_mdb_entry entry; + memset(&entry, 0, sizeof(entry)); entry.ifindex = port->dev->ifindex; entry.addr.proto = group->proto; entry.addr.u.ip4 = group->u.ip4; -- cgit v1.2.3 From 84d73cd3fb142bf1298a8c13fd4ca50fd2432372 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 9 Mar 2013 05:52:20 +0000 Subject: rtnl: fix info leak on RTM_GETLINK request for VF devices Initialize the mac address buffer with 0 as the driver specific function will probably not fill the whole buffer. In fact, all in-kernel drivers fill only ETH_ALEN of the MAX_ADDR_LEN bytes, i.e. 6 of the 32 possible bytes. Therefore we currently leak 26 bytes of stack memory to userland via the netlink interface. Signed-off-by: Mathias Krause Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..a585d45cc9d9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, * report anything. */ ivi.spoofchk = -1; + memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = -- cgit v1.2.3 From 29cd8ae0e1a39e239a3a7b67da1986add1199fc0 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 9 Mar 2013 05:52:21 +0000 Subject: dcbnl: fix various netlink info leaks The dcb netlink interface leaks stack memory in various places: * perm_addr[] buffer is only filled at max with 12 of the 32 bytes but copied completely, * no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand, so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes for ieee_pfc structs, etc., * the same is true for CEE -- no in-kernel driver fills the whole struct, Prevent all of the above stack info leaks by properly initializing the buffers/structures involved. Signed-off-by: Mathias Krause Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; + memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); @@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) @@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; + memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, @@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) @@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; + memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) @@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) @@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; + memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) @@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpfc) { struct cee_pfc pfc; + memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) -- cgit v1.2.3 From fef4c86e59a76f2ec1a77d5732f40752700bd5dd Mon Sep 17 00:00:00 2001 From: David Oostdyk Date: Fri, 8 Mar 2013 08:28:15 +0000 Subject: rrunner.c: fix possible memory leak in rr_init_one() In the event that register_netdev() failed, the rrpriv->evt_ring allocation would have not been freed. Signed-off-by: David Oostdyk Signed-off-by: David S. Miller --- drivers/net/hippi/rrunner.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index e5b19b056909..3c4d6274bb9b 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out: + if (rrpriv->evt_ring) + pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring, + rrpriv->evt_ring_dma); if (rrpriv->rx_ring) pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, rrpriv->rx_ring_dma); -- cgit v1.2.3 From 9026c4927254f5bea695cc3ef2e255280e6a3011 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki / 吉藤英明 Date: Sat, 9 Mar 2013 09:11:57 +0000 Subject: 6lowpan: Fix endianness issue in is_addr_link_local(). Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- net/ieee802154/6lowpan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -84,7 +84,7 @@ (memcmp(addr1, addr2, length >> 3) == 0) /* local link, i.e. FE80::/10 */ -#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) /* * check whether we can compress the IID to 16 bits, -- cgit v1.2.3