aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/tx_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/tx_common.c')
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c46
1 files changed, 43 insertions, 3 deletions
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 70876df1da69..2a058b76d1f0 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -10,7 +10,7 @@
#include "net_driver.h"
#include "efx.h"
-#include "nic.h"
+#include "nic_common.h"
#include "tx_common.h"
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
@@ -311,6 +311,20 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
return buffer;
}
+int efx_tx_tso_header_length(struct sk_buff *skb)
+{
+ size_t header_len;
+
+ if (skb->encapsulation)
+ header_len = skb_inner_transport_header(skb) -
+ skb->data +
+ (inner_tcp_hdr(skb)->doff << 2u);
+ else
+ header_len = skb_transport_header(skb) - skb->data +
+ (tcp_hdr(skb)->doff << 2u);
+ return header_len;
+}
+
/* Map all data from an SKB for DMA and create descriptors on the queue. */
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int segment_count)
@@ -339,8 +353,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
/* For TSO we need to put the header in to a separate
* descriptor. Map this separately if necessary.
*/
- size_t header_len = skb_transport_header(skb) - skb->data +
- (tcp_hdr(skb)->doff << 2u);
+ size_t header_len = efx_tx_tso_header_length(skb);
if (header_len != len) {
tx_queue->tso_long_headers++;
@@ -405,3 +418,30 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
return max_descs;
}
+
+/*
+ * Fallback to software TSO.
+ *
+ * This is used if we are unable to send a GSO packet through hardware TSO.
+ * This should only ever happen due to per-queue restrictions - unsupported
+ * packets should first be filtered by the feature flags.
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct sk_buff *segments, *next;
+
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments))
+ return PTR_ERR(segments);
+
+ dev_consume_skb_any(skb);
+
+ skb_list_walk_safe(segments, skb, next) {
+ skb_mark_not_on_list(skb);
+ efx_enqueue_skb(tx_queue, skb);
+ }
+
+ return 0;
+}