diff options
author | David S. Miller | 2014-08-22 12:31:24 -0700 |
---|---|---|
committer | David S. Miller | 2014-08-22 12:31:24 -0700 |
commit | 0c32ec8f5ba885fda08b1b822158b6135e56a308 (patch) | |
tree | bf174e7c1545cfe4e560d9608e2966681c391adb | |
parent | c0b802367b05fa6342ab9ef07abdf446b9ba223f (diff) | |
parent | 97539f1e4f1e4b53604970b2dfe7794794f57a76 (diff) |
Merge branch 'bnx2x-next'
Yuval Mintz says:
====================
bnx2x: Start utilizing 7.10.51
This series will enable bnx2x to start utlizing its 7.10.51 FW.
In addition, it will also add timestamping support, as well as a couple
of routine semantic cleanups.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 90 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 222 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 221 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 841 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 178 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 114 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 62 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 9 |
15 files changed, 1476 insertions, 409 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d777fae86988..86e94517a536 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -20,13 +20,17 @@ #include <linux/types.h> #include <linux/pci_regs.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/clocksource.h> + /* compilation time flags */ /* define this to make the driver freeze on error to allow getting debug info * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_VERSION "1.710.51-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -70,6 +74,7 @@ enum bnx2x_int_mode { #define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_IOV 0x0800000 +#define BNX2X_MSG_PTP 0x1000000 #define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ #define BNX2X_MSG_ETHTOOL 0x4000000 #define BNX2X_MSG_DCB 0x8000000 @@ -1587,10 +1592,11 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) -#define INTERRUPTS_ENABLED_FLAG (1 << 23) -#define BC_SUPPORTS_RMMOD_CMD (1 << 24) -#define HAS_PHYS_PORT_ID (1 << 25) -#define AER_ENABLED (1 << 26) +#define BC_SUPPORTS_RMMOD_CMD (1 << 23) +#define HAS_PHYS_PORT_ID (1 << 24) +#define AER_ENABLED (1 << 25) +#define PTP_SUPPORTED (1 << 26) +#define TX_TIMESTAMPING_EN (1 << 27) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1684,13 +1690,9 @@ struct bnx2x { #define BNX2X_STATE_ERROR 0xf000 #define BNX2X_MAX_PRIORITY 8 -#define BNX2X_MAX_ENTRIES_PER_PRI 16 -#define BNX2X_MAX_COS 3 -#define BNX2X_MAX_TX_COS 2 int num_queues; uint num_ethernet_queues; uint num_cnic_queues; - int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1933,6 +1935,19 @@ struct bnx2x { u8 phys_port_id[ETH_ALEN]; + /* PTP related context */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct work_struct ptp_task; + struct cyclecounter cyclecounter; + struct timecounter timecounter; + bool timecounter_init_done; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + bool hwtstamp_ioctl_called; + u16 tx_type; + u16 rx_filter; + struct bnx2x_link_report_data vf_link_vars; }; @@ -2559,4 +2574,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp); #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) +void bnx2x_init_ptp(struct bnx2x *bp); +int bnx2x_configure_ptp_filters(struct bnx2x *bp); +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); + +#define BNX2X_MAX_PHC_DRIFT 31000000 +#define BNX2X_PTP_TX_TIMEOUT + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e6c82e20224..a1dd2e417a97 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1067,6 +1067,11 @@ reuse_rx: skb_record_rx_queue(skb, fp->rx_queue); + /* Check if this packet was timestamped */ + if (unlikely(le16_to_cpu(cqe->fast_path_cqe.type_error_flags) & + (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) + bnx2x_set_rx_ts(bp, skb); + if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -2082,6 +2087,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + + if (!CHIP_IS_E1x(bp)) + /* valid only for TUNN_MODE_GRE tunnel mode */ + __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2804,7 +2813,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Initialize Rx filter. */ bnx2x_set_rx_mode_inner(bp); - /* Start the Tx */ + if (bp->flags & PTP_SUPPORTED) { + bnx2x_init_ptp(bp); + bnx2x_configure_ptp_filters(bp); + } + /* Start Tx */ switch (load_mode) { case LOAD_NORMAL: /* Tx queue should be only re-enabled */ @@ -3441,26 +3454,6 @@ exit_lbl: } #endif -static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) -{ - struct ipv6hdr *ipv6; - - *parsing_data |= (skb_shinfo(skb)->gso_size << - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & - ETH_TX_PARSE_BD_E2_LSO_MSS; - - if (xmit_type & XMIT_GSO_ENC_V6) - ipv6 = inner_ipv6_hdr(skb); - else if (xmit_type & XMIT_GSO_V6) - ipv6 = ipv6_hdr(skb); - else - ipv6 = NULL; - - if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) - *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; -} - /** * bnx2x_set_pbd_gso - update PBD in GSO case. * @@ -3470,7 +3463,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3483,9 +3475,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - /* GSO on 57710/57711 needs FW to calculate IP checksum */ - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } else { pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -3657,18 +3646,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, (__force u32)iph->tot_len - (__force u32)iph->frag_off; + outerip_len = iph->ihl << 1; + pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + pbd_e2->data.tunnel_data.flags |= + 1 /*IPv6*/ << ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); - if (xmit_type & XMIT_GSO_V4) { + /* inner IP header info */ + if (xmit_type & XMIT_CSUM_ENC_V4) { pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); pbd_e2->data.tunnel_data.pseudo_csum = @@ -3676,8 +3670,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - outerip_len = ip_hdr(skb)->ihl << 1; } else { pbd_e2->data.tunnel_data.pseudo_csum = bswab16(~csum_ipv6_magic( @@ -3690,8 +3682,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, *global_data |= outerip_off | - (!!(xmit_type & XMIT_CSUM_V6) << - ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | (outerip_len << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << @@ -3703,6 +3693,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, } } +static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + struct ipv6hdr *ipv6; + + if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) + return; + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else /* XMIT_GSO_V6 */ + ipv6 = ipv6_hdr(skb); + + if (ipv6->nexthdr == NEXTHDR_IPV6) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@ -3835,6 +3842,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (!(bp->flags & TX_TIMESTAMPING_EN)) { + BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (bp->ptp_tx_skb) { + BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + bp->ptp_tx_skb = skb_get(skb); + bp->ptp_tx_start = jiffies; + schedule_work(&bp->ptp_task); + } + } + /* header nbd: indirectly zero other flags! */ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; @@ -3919,6 +3940,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) xmit_type); } + bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); /* Add the macs to the parsing BD if this is a vf or if * Tx Switching is enabled. */ @@ -3984,10 +4006,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_prod); } if (!CHIP_IS_E1x(bp)) - bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, - xmit_type); + pbd_e2_parsing_data |= + (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; else - bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); } /* Set the PBD's parsing_data field if not zero diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 571427c7226b..ac63e16829ef 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp) else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->gre_tunnel_mode = L2GRE_TUNNEL; - start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; + start_params->tunnel_mode = TUNN_MODE_GRE; + start_params->gre_tunnel_type = IPGRE_TUNNEL; + start_params->inner_gre_rss_en = 1; return bnx2x_func_state_change(bp, &func_params); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fb26bc4c42a1..6e4294ed1fc9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - int rc = 0; DP(BNX2X_MSG_DCB, "SET-ALL\n"); @@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) 1); bnx2x_dcbx_init(bp, true); } - DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); - if (rc) - return 1; + DP(BNX2X_MSG_DCB, "set_dcbx_params done\n"); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 92fee842f954..0b173ed20ae9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev, return bnx2x_nic_load(bp, LOAD_NORMAL); } +static int bnx2x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & PTP_SUPPORTED) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); + else + info->phc_index = -1; + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); + + return 0; + } + + return ethtool_op_get_ts_info(dev, info); +} + static const struct ethtool_ops bnx2x_ethtool_ops = { .get_settings = bnx2x_get_settings, .set_settings = bnx2x_set_settings, @@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_module_eeprom = bnx2x_get_module_eeprom, .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = bnx2x_get_ts_info, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 95dc36543548..7636e3c18771 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -10,170 +10,170 @@ #ifndef BNX2X_FW_DEFS_H #define BNX2X_FW_DEFS_H -#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base) #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[147].base + ((assertListEntry) * IRO[147].m1)) + (IRO[151].base + ((assertListEntry) * IRO[151].m1)) #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ - (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ - IRO[153].m2)) + (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \ + IRO[157].m2)) #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ - (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ - IRO[154].m2)) + (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \ + IRO[158].m2)) #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ - (IRO[159].base + ((funcId) * IRO[159].m1)) + (IRO[163].base + ((funcId) * IRO[163].m1)) #define CSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[149].base + ((funcId) * IRO[149].m1)) + (IRO[153].base + ((funcId) * IRO[153].m1)) #define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ - (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) + (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2)) #define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ - (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ - * IRO[138].m2) + ((sbId) * IRO[138].m3)) -#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) + (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \ + * IRO[142].m2) + ((sbId) * IRO[142].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[161].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[317].base + ((pfId) * IRO[317].m1)) + (IRO[323].base + ((pfId) * IRO[323].m1)) #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[318].base + ((pfId) * IRO[318].m1)) + (IRO[324].base + ((pfId) * IRO[324].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) -#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) + (IRO[322].base + ((pfId) * IRO[322].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[308].base + ((pfId) * IRO[308].m1)) + (IRO[314].base + ((pfId) * IRO[314].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[307].base + ((pfId) * IRO[307].m1)) + (IRO[313].base + ((pfId) * IRO[313].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) + (IRO[312].base + ((pfId) * IRO[312].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[151].base + ((funcId) * IRO[151].m1)) + (IRO[155].base + ((funcId) * IRO[155].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ - (IRO[142].base + ((pfId) * IRO[142].m1)) + (IRO[146].base + ((pfId) * IRO[146].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ - (IRO[143].base + ((pfId) * IRO[143].m1)) + (IRO[147].base + ((pfId) * IRO[147].m1)) #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ - (IRO[141].base + ((pfId) * IRO[141].m1)) -#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) + (IRO[145].base + ((pfId) * IRO[145].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size) #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ - (IRO[144].base + ((pfId) * IRO[144].m1)) -#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) + (IRO[148].base + ((pfId) * IRO[148].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size) #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ - (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) + (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2)) #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ - (IRO[133].base + ((sbId) * IRO[133].m1)) + (IRO[137].base + ((sbId) * IRO[137].m1)) #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ - (IRO[134].base + ((sbId) * IRO[134].m1)) + (IRO[138].base + ((sbId) * IRO[138].m1)) #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ - (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) + (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2)) #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ - (IRO[132].base + ((sbId) * IRO[132].m1)) -#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) + (IRO[136].base + ((sbId) * IRO[136].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size) #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ - (IRO[137].base + ((sbId) * IRO[137].m1)) -#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) + (IRO[141].base + ((sbId) * IRO[141].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size) #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ - (IRO[155].base + ((vfId) * IRO[155].m1)) + (IRO[159].base + ((vfId) * IRO[159].m1)) #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ - (IRO[156].base + ((vfId) * IRO[156].m1)) + (IRO[160].base + ((vfId) * IRO[160].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[150].base + ((funcId) * IRO[150].m1)) + (IRO[154].base + ((funcId) * IRO[154].m1)) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ - (IRO[203].base + ((pfId) * IRO[203].m1)) + (IRO[207].base + ((pfId) * IRO[207].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[101].base + ((assertListEntry) * IRO[101].m1)) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ - (IRO[201].base + ((pfId) * IRO[201].m1)) + (IRO[205].base + ((pfId) * IRO[205].m1)) #define TSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[103].base + ((funcId) * IRO[103].m1)) + (IRO[107].base + ((funcId) * IRO[107].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[272].base + ((pfId) * IRO[272].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ - (IRO[273].base + ((pfId) * IRO[273].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) + (IRO[280].base + ((pfId) * IRO[280].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ - (IRO[275].base + ((pfId) * IRO[275].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[271].base + ((pfId) * IRO[271].m1)) + (IRO[277].base + ((pfId) * IRO[277].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) + (IRO[276].base + ((pfId) * IRO[276].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) + (IRO[275].base + ((pfId) * IRO[275].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[268].base + ((pfId) * IRO[268].m1)) + (IRO[274].base + ((pfId) * IRO[274].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) + (IRO[284].base + ((pfId) * IRO[284].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[264].base + ((pfId) * IRO[264].m1)) + (IRO[270].base + ((pfId) * IRO[270].m1)) #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[265].base + ((pfId) * IRO[265].m1)) + (IRO[271].base + ((pfId) * IRO[271].m1)) #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[266].base + ((pfId) * IRO[266].m1)) + (IRO[272].base + ((pfId) * IRO[272].m1)) #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) + (IRO[273].base + ((pfId) * IRO[273].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ - (IRO[202].base + ((pfId) * IRO[202].m1)) + (IRO[206].base + ((pfId) * IRO[206].m1)) #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[105].base + ((funcId) * IRO[105].m1)) + (IRO[109].base + ((funcId) * IRO[109].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[217].base + ((pfId) * IRO[217].m1)) + (IRO[223].base + ((pfId) * IRO[223].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[104].base + ((funcId) * IRO[104].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[206].base) -#define USTORM_AGG_DATA_SIZE (IRO[206].size) -#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) + (IRO[108].base + ((funcId) * IRO[108].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[212].base) +#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[176].base + ((assertListEntry) * IRO[176].m1)) + (IRO[180].base + ((assertListEntry) * IRO[180].m1)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ - (IRO[183].base + ((portId) * IRO[183].m1)) + (IRO[187].base + ((portId) * IRO[187].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[319].base + ((pfId) * IRO[319].m1)) + (IRO[325].base + ((pfId) * IRO[325].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[178].base + ((funcId) * IRO[178].m1)) + (IRO[182].base + ((funcId) * IRO[182].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ - (IRO[182].base + ((pfId) * IRO[182].m1)) + (IRO[186].base + ((pfId) * IRO[186].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[180].base + ((funcId) * IRO[180].m1)) + (IRO[184].base + ((funcId) * IRO[184].m1)) #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ - IRO[209].m2)) + (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ + IRO[215].m2)) #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[210].base + ((qzoneId) * IRO[210].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[207].base) -#define USTORM_TPA_BTR_SIZE (IRO[207].size) + (IRO[216].base + ((qzoneId) * IRO[216].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[213].base) +#define USTORM_TPA_BTR_SIZE (IRO[213].size) #define USTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[179].base + ((funcId) * IRO[179].m1)) + (IRO[183].base + ((funcId) * IRO[183].m1)) #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) @@ -186,39 +186,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[307].base + ((pfId) * IRO[307].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[308].base + ((pfId) * IRO[308].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) + (IRO[309].base + ((pfId) * IRO[309].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) + (IRO[310].base + ((pfId) * IRO[310].m1)) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) + (IRO[311].base + ((pfId) * IRO[311].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[304].base + ((pfId) * IRO[304].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -231,16 +231,19 @@ #define XSTORM_SPQ_PROD_OFFSET(funcId) \ (IRO[31].base + ((funcId) * IRO[31].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[211].base + ((portId) * IRO[211].m1)) + (IRO[217].base + ((portId) * IRO[217].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ - (IRO[212].base + ((portId) * IRO[212].m1)) + (IRO[218].base + ((portId) * IRO[218].m1)) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ - IRO[214].m2)) + (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ + IRO[220].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 +/* eth hsi version */ +#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2) + /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 #define FIRST_BD_IN_PKT 0 @@ -356,6 +359,7 @@ #define XSEMI_CLK1_RESUL_CHIP (1e-3) #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) +#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6)) /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ba8af50c84f..7ea04537ecbf 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2876,8 +2876,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 19 +#define BCM_5710_FW_MINOR_VERSION 10 +#define BCM_5710_FW_REVISION_VERSION 51 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3446,6 +3446,7 @@ enum classify_rule { CLASSIFY_RULE_OPCODE_MAC, CLASSIFY_RULE_OPCODE_VLAN, CLASSIFY_RULE_OPCODE_PAIR, + CLASSIFY_RULE_OPCODE_VXLAN, MAX_CLASSIFY_RULE }; @@ -3475,7 +3476,8 @@ struct client_init_general_data { u8 func_id; u8 cos; u8 traffic_type; - u32 reserved0; + u8 fp_hsi_ver; + u8 reserved0[3]; }; @@ -3545,7 +3547,9 @@ struct client_init_rx_data { __le16 rx_cos_mask; __le16 silent_vlan_value; __le16 silent_vlan_mask; - __le32 reserved6[2]; + u8 handle_ptp_pkts_flg; + u8 reserved6[3]; + __le32 reserved7; }; /* @@ -3576,7 +3580,7 @@ struct client_init_tx_data { u8 tunnel_lso_inc_ip_id; u8 refuse_outband_vlan_flg; u8 tunnel_non_lso_pcsum_location; - u8 reserved1; + u8 tunnel_non_lso_outer_ip_csum_location; }; /* @@ -3614,7 +3618,9 @@ struct client_update_ramrod_data { u8 refuse_outband_vlan_change_flg; u8 tx_switching_flg; u8 tx_switching_change_flg; - __le32 reserved1; + u8 handle_ptp_pkts_flg; + u8 handle_ptp_pkts_change_flg; + __le16 reserved1; __le32 echo; }; @@ -3634,6 +3640,11 @@ struct double_regpair { u32 regpair1_hi; }; +/* 2nd parse bd type used in ethernet tx BDs */ +enum eth_2nd_parse_bd_type { + ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL, + MAX_ETH_2ND_PARSE_BD_TYPE +}; /* * Ethernet address typesm used in ethernet tx BDs @@ -3719,12 +3730,25 @@ struct eth_classify_vlan_cmd { }; /* + * Command for adding/removing a VXLAN classification rule + */ +struct eth_classify_vxlan_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 inner_mac_lsb; + __le16 inner_mac_mid; + __le16 inner_mac_msb; + __le16 reserved1; +}; + +/* * union for eth classification rule */ union eth_classify_rule_cmd { struct eth_classify_mac_cmd mac; struct eth_classify_vlan_cmd vlan; struct eth_classify_pair_cmd pair; + struct eth_classify_vxlan_cmd vxlan; }; /* @@ -3830,8 +3854,10 @@ struct eth_fast_path_rx_cqe { #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) -#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7 u8 status_flags; #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 @@ -3902,6 +3928,13 @@ struct eth_filter_rules_ramrod_data { struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; }; +/* Hsi version */ +enum eth_fp_hsi_ver { + ETH_FP_HSI_VER_0, + ETH_FP_HSI_VER_1, + ETH_FP_HSI_VER_2, + MAX_ETH_FP_HSI_VER +}; /* * parameters for eth classification configuration ramrod @@ -3958,20 +3991,28 @@ struct eth_tunnel_data { __le16 dst_mid; #endif #if defined(__BIG_ENDIAN) - __le16 reserved0; + __le16 fw_ip_hdr_csum; __le16 dst_hi; #elif defined(__LITTLE_ENDIAN) __le16 dst_hi; - __le16 reserved0; + __le16 fw_ip_hdr_csum; #endif #if defined(__BIG_ENDIAN) - u8 reserved1; + u8 flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 u8 ip_hdr_start_inner_w; __le16 pseudo_csum; #elif defined(__LITTLE_ENDIAN) __le16 pseudo_csum; u8 ip_hdr_start_inner_w; - u8 reserved1; + u8 flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 #endif }; @@ -4059,31 +4100,41 @@ enum eth_rss_mode { */ struct eth_rss_update_ramrod_data { u8 rss_engine_id; - u8 capabilities; + u8 rss_mode; + __le16 capabilities; #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 u8 rss_result_mask; - u8 rss_mode; - __le16 udp_4tuple_dst_port_mask; - __le16 udp_4tuple_dst_port_value; + u8 reserved3; + __le16 reserved4; u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; __le32 rss_key[T_ETH_RSS_KEY]; __le32 echo; - __le32 reserved3; + __le32 reserved5; }; @@ -4255,10 +4306,10 @@ enum eth_tunnel_lso_inc_ip_id { /* In case tunnel exist and L4 checksum offload, * the pseudo checksum location, on packet or on BD. */ -enum eth_tunnel_non_lso_pcsum_location { - PCSUM_ON_PKT, - PCSUM_ON_BD, - MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION +enum eth_tunnel_non_lso_csum_location { + CSUM_ON_PKT, + CSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; /* @@ -4305,8 +4356,10 @@ struct eth_tx_start_bd { __le16 vlan_or_ethertype; struct eth_tx_bd_flags bd_flags; u8 general_data; -#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) +#define ETH_TX_START_BD_HDR_NBDS (0x7<<0) #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3) +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) @@ -4382,8 +4435,8 @@ struct eth_tx_parse_2nd_bd { __le16 global_data; #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 #define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) @@ -4392,9 +4445,14 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 -#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) -#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 - __le16 reserved1; +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + u8 bd_type; +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4 + u8 reserved3; u8 tcp_flags; #define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) #define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 @@ -4412,7 +4470,7 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 #define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) #define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 - u8 reserved2; + u8 reserved4; u8 tunnel_udp_hdr_start_w; u8 fw_ip_hdr_to_payload_w; __le16 fw_ip_csum_wo_len_flags_frag; @@ -5200,10 +5258,18 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - __le16 reserved1[2]; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + u8 sd_accept_mf_clss_fail; + __le16 vxlan_dst_port; + __le16 sd_accept_mf_clss_fail_ethtype; + __le16 sd_vlan_eth_type; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_accept_mf_clss_fail_match_ethtype; + u8 no_added_tags; }; struct function_update_data { @@ -5220,12 +5286,20 @@ struct function_update_data { u8 tx_switch_suspend_change_flg; u8 tx_switch_suspend; u8 echo; + u8 update_tunn_cfg_flg; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + __le16 vxlan_dst_port; + u8 sd_vlan_force_pri_change_flg; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_vlan_tag_change_flg; + u8 sd_vlan_eth_type_change_flg; u8 reserved1; - u8 update_gre_cfg_flg; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - u32 reserved3; + __le16 sd_vlan_tag; + __le16 sd_vlan_eth_type; }; /* @@ -5254,17 +5328,9 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; -/* GRE RSS Mode */ -enum gre_rss_mode { - GRE_OUTER_HEADERS_RSS, - GRE_INNER_HEADERS_RSS, - NVGRE_KEY_ENTROPY_RSS, - MAX_GRE_RSS_MODE -}; /* GRE Tunnel Mode */ enum gre_tunnel_type { - NO_GRE_TUNNEL, NVGRE_TUNNEL, L2GRE_TUNNEL, IPGRE_TUNNEL, @@ -5437,6 +5503,7 @@ enum ip_ver { * Malicious VF error ID */ enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, VF_PF_CHANNEL_NOT_READY, ETH_ILLEGAL_BD_LENGTHS, ETH_PACKET_TOO_SHORT, @@ -5597,6 +5664,16 @@ struct protocol_common_spe { union protocol_common_specific_data data; }; +/* The data for the Set Timesync Ramrod */ +struct set_timesync_ramrod_data { + u8 drift_adjust_cmd; + u8 offset_cmd; + u8 add_sub_drift_adjust_value; + u8 drift_adjust_value; + u32 drift_adjust_period; + struct regpair offset_delta; +}; + /* * The send queue element */ @@ -5719,10 +5796,38 @@ struct tstorm_vf_zone_data { struct regpair reserved; }; +/* Add or Subtract Value for Set Timesync Ramrod */ +enum ts_add_sub_value { + TS_SUB_VALUE, + TS_ADD_VALUE, + MAX_TS_ADD_SUB_VALUE +}; -/* - * zone A per-queue data - */ +/* Drift-Adjust Commands for Set Timesync Ramrod */ +enum ts_drift_adjust_cmd { + TS_DRIFT_ADJUST_KEEP, + TS_DRIFT_ADJUST_SET, + TS_DRIFT_ADJUST_RESET, + MAX_TS_DRIFT_ADJUST_CMD +}; + +/* Offset Commands for Set Timesync Ramrod */ +enum ts_offset_cmd { + TS_OFFSET_KEEP, + TS_OFFSET_INC, + TS_OFFSET_DEC, + MAX_TS_OFFSET_CMD +}; + +/* Tunnel Mode */ +enum tunnel_mode { + TUNN_MODE_NONE, + TUNN_MODE_VXLAN, + TUNN_MODE_GRE, + MAX_TUNNEL_MODE +}; + + /* zone A per-queue data */ struct ustorm_queue_zone_data { struct ustorm_eth_rx_producers eth_rx_producers; struct regpair reserved[3]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c13364b6cc19..07cfe33798a4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -63,7 +63,6 @@ #include "bnx2x_vfpf.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" - #include <linux/firmware.h> #include "bnx2x_fw_file_hdr.h" /* FW files */ @@ -290,6 +289,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); * General service functions ****************************************************************************/ +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); + static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { @@ -523,6 +524,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ + spin_lock_bh(&bp->dmae_lock); /* reset completion */ @@ -551,7 +553,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, } unlock: + spin_unlock_bh(&bp->dmae_lock); + return rc; } @@ -646,119 +650,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } +enum storms { + XSTORM, + TSTORM, + CSTORM, + USTORM, + MAX_STORMS +}; + +#define STORMS_NUM 4 +#define REGS_IN_ENTRY 4 + +static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, + enum storms storm, + int entry) +{ + switch (storm) { + case XSTORM: + return XSTORM_ASSERT_LIST_OFFSET(entry); + case TSTORM: + return TSTORM_ASSERT_LIST_OFFSET(entry); + case CSTORM: + return CSTORM_ASSERT_LIST_OFFSET(entry); + case USTORM: + return USTORM_ASSERT_LIST_OFFSET(entry); + case MAX_STORMS: + default: + BNX2X_ERR("unknown storm\n"); + } + return -EINVAL; +} + static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; - int i, rc = 0; - u32 row0, row1, row2, row3; - - /* XSTORM */ - last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* TSTORM */ - last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + int i, j, rc = 0; + enum storms storm; + u32 regs[REGS_IN_ENTRY]; + u32 bar_storm_intmem[STORMS_NUM] = { + BAR_XSTRORM_INTMEM, + BAR_TSTRORM_INTMEM, + BAR_CSTRORM_INTMEM, + BAR_USTRORM_INTMEM + }; + u32 storm_assert_list_index[STORMS_NUM] = { + XSTORM_ASSERT_LIST_INDEX_OFFSET, + TSTORM_ASSERT_LIST_INDEX_OFFSET, + CSTORM_ASSERT_LIST_INDEX_OFFSET, + USTORM_ASSERT_LIST_INDEX_OFFSET + }; + char *storms_string[STORMS_NUM] = { + "XSTORM", + "TSTORM", + "CSTORM", + "USTORM" + }; - /* CSTORM */ - last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; + for (storm = XSTORM; storm < MAX_STORMS; storm++) { + last_idx = REG_RD8(bp, bar_storm_intmem[storm] + + storm_assert_list_index[storm]); + if (last_idx) + BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", + storms_string[storm], last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + /* read a single assert entry */ + for (j = 0; j < REGS_IN_ENTRY; j++) + regs[j] = REG_RD(bp, bar_storm_intmem[storm] + + bnx2x_get_assert_list_entry(bp, + storm, + i) + + sizeof(u32) * j); + + /* log entry if it contains a valid assert */ + if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + storms_string[storm], i, regs[3], + regs[2], regs[1], regs[0]); + rc++; + } else { + break; + } } } - /* USTORM */ - last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n", + CHIP_IS_E1(bp) ? "everest1" : + CHIP_IS_E1H(bp) ? "everest1h" : + CHIP_IS_E2(bp) ? "everest2" : "everest3", + BCM_5710_FW_MAJOR_VERSION, + BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION); return rc; } @@ -983,6 +966,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) u32 *sb_data_p; struct bnx2x_fp_txdata txdata; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, @@ -995,7 +984,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_cos_in_tx_queue(fp, cos) { + if (!fp->txdata_ptr) + break; + txdata = *fp->txdata_ptr[cos]; + + if (!txdata.tx_cons_sb) + continue; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, @@ -1097,6 +1093,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) for_each_valid_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); for (j = start; j != end; j = RX_BD(j + 1)) { @@ -1130,9 +1132,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_valid_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + + if (!bp->fp) + break; + for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + if (!fp->txdata_ptr) + break; + + if (!txdata.tx_cons_sb) + continue; + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); for (j = start; j != end; j = TX_BD(j + 1)) { @@ -2071,8 +2083,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) else value = 0; - DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); - return value; } @@ -4678,7 +4688,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: @@ -4757,21 +4767,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, _print_next_block((*par_num)++, "MCP ROM"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) @@ -4803,7 +4813,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: @@ -5452,6 +5462,14 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; goto next_spqe; + + case EVENT_RING_OPCODE_SET_TIMESYNC: + DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, + "got set_timesync ramrod completion\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_SET_TIMESYNC)) + break; + goto next_spqe; } switch (opcode | bp->state) { @@ -6102,7 +6120,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ - if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + if (rx_mode != BNX2X_RX_MODE_NONE) { __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); } @@ -7647,7 +7665,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; /* Function parameters */ - switch_update_params->suspend = suspend; + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes); + if (suspend) + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); rc = bnx2x_func_state_change(bp, &func_params); @@ -9010,7 +9032,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -9029,6 +9051,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } +static void bnx2x_disable_ptp(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + /* Disable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Reset PTP event detection rules */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x0); +} + +/* Called during unload, to stop PTP-related stuff */ +void bnx2x_stop_ptp(struct bnx2x *bp) +{ + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&bp->ptp_task); + + if (bp->ptp_tx_skb) { + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + } + + /* Disable PTP in HW */ + bnx2x_disable_ptp(bp); + + DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); +} + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); @@ -9147,6 +9211,13 @@ unload_error: #endif } + /* stop_ptp should be after the Tx queues are drained to prevent + * scheduling to the cancelled PTP work queue. It should also be after + * function stop ramrod is sent, since as part of this ramrod FW access + * PTP registers. + */ + bnx2x_stop_ptp(bp); + /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); /* Delete all NAPI objects */ @@ -12019,6 +12090,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->dump_preset_idx = 1; + if (CHIP_IS_E3B0(bp)) + bp->flags |= PTP_SUPPORTED; + return rc; } @@ -12351,13 +12425,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct bnx2x *bp = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); - DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", - mdio->phy_id, mdio->reg_num, mdio->val_in); - if (!netif_running(dev)) return -EAGAIN; - return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + switch (cmd) { + case SIOCSHWTSTAMP: + return bnx2x_hwtstamp_ioctl(bp, ifr); + default: + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + } } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -13001,6 +13079,191 @@ static int set_is_vf(int chip_id) } } +/* nig_tsgen registers relative address */ +#define tsgen_ctrl 0x0 +#define tsgen_freecount 0x10 +#define tsgen_synctime_t0 0x20 +#define tsgen_offset_t0 0x28 +#define tsgen_drift_t0 0x30 +#define tsgen_synctime_t1 0x58 +#define tsgen_offset_t1 0x60 +#define tsgen_drift_t1 0x68 + +/* FW workaround for setting drift */ +static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, + int best_val, int best_period) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + set_timesync_params->add_sub_drift_adjust_value = + drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; + set_timesync_params->drift_adjust_value = best_val; + set_timesync_params->drift_adjust_period = best_period; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + int rc; + int drift_dir = 1; + int val, period, period1, period2, dif, dif1, dif2; + int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + + DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP adjfreq called while the interface is down\n"); + return -EFAULT; + } + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb == 0) { + best_val = 1; + best_period = 0x1FFFFFF; + } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { + best_val = 31; + best_period = 1; + } else { + /* Changed not to allow val = 8, 16, 24 as these values + * are not supported in workaround. + */ + for (val = 0; val <= 31; val++) { + if ((val & 0x7) == 0) + continue; + period1 = val * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (val * 1000000 / period1); + else + dif1 = BNX2X_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (val * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_val = val; + best_period = period; + } + } + } + + rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, + best_period); + if (rc) { + BNX2X_ERR("Failed to set drift\n"); + return -EFAULT; + } + + DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val, + best_period); + + return 0; +} + +static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 now; + + DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); + + now = timecounter_read(&bp->timecounter); + now += delta; + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, now); + + return 0; +} + +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + u32 remainder; + + ns = timecounter_read(&bp->timecounter); + + DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + + BNX2X_ERR("PHC ancillary features are not supported\n"); + return -ENOTSUPP; +} + +void bnx2x_register_phc(struct bnx2x *bp) +{ + /* Fill the ptp_clock_info struct and register PTP clock*/ + bp->ptp_clock_info.owner = THIS_MODULE; + snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); + bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ + bp->ptp_clock_info.n_alarm = 0; + bp->ptp_clock_info.n_ext_ts = 0; + bp->ptp_clock_info.n_per_out = 0; + bp->ptp_clock_info.pps = 0; + bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; + bp->ptp_clock_info.gettime = bnx2x_ptp_gettime; + bp->ptp_clock_info.settime = bnx2x_ptp_settime; + bp->ptp_clock_info.enable = bnx2x_ptp_enable; + + bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); + if (IS_ERR(bp->ptp_clock)) { + bp->ptp_clock = NULL; + BNX2X_ERR("PTP clock registeration failed\n"); + } +} + static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -13172,6 +13435,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, "Unknown", dev->base_addr, bp->pdev->irq, dev->dev_addr); + bnx2x_register_phc(bp); + return 0; init_one_exit: @@ -13198,6 +13463,11 @@ static void __bnx2x_remove(struct pci_dev *pdev, struct bnx2x *bp, bool remove_netdev) { + if (bp->ptp_clock) { + ptp_clock_unregister(bp->ptp_clock); + bp->ptp_clock = NULL; + } + /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -14173,3 +14443,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) REG_RD(bp, pretend_reg); return 0; } + +static void bnx2x_ptp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); + int port = BP_PORT(bp); + u32 val_seq; + u64 timestamp, ns; + struct skb_shared_hwtstamps shhwtstamps; + + /* Read Tx timestamp registers */ + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID); + if (val_seq & 0x10000) { + /* There is a valid timestamp value */ + timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + + DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); + } else { + DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&bp->ptp_task); + } +} + +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) +{ + int port = BP_PORT(bp); + u64 timestamp, ns; + + timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); + + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + + DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC */ +static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) +{ + struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); + int port = BP_PORT(bp); + u32 wb_data[2]; + u64 phc_cycles; + + REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : + NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); + phc_cycles = wb_data[1]; + phc_cycles = (phc_cycles << 32) + wb_data[0]; + + DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void bnx2x_init_cyclecounter(struct bnx2x *bp) +{ + memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); + bp->cyclecounter.read = bnx2x_cyclecounter_read; + bp->cyclecounter.mask = CLOCKSOURCE_MASK(64); + bp->cyclecounter.shift = 1; + bp->cyclecounter.mult = 1; +} + +static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + + return bnx2x_func_state_change(bp, &func_params); +} + +int bnx2x_enable_ptp_packets(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params q_params; + int rc, i; + + /* send queue update ramrod to enable PTP packets */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + &q_params.params.update.update_flags); + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to enable PTP packets\n"); + return rc; + } + } + + return 0; +} + +int bnx2x_configure_ptp_filters(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int rc; + + if (!bp->hwtstamp_ioctl_called) + return 0; + + switch (bp->tx_type) { + case HWTSTAMP_TX_ON: + bp->flags |= TX_TIMESTAMPING_EN; + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + BNX2X_ERR("One-step timestamping is not supported\n"); + return -ERANGE; + } + + switch (bp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + bp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + break; + } + + /* Indicate to FW that this PF expects recorded PTP packets */ + rc = bnx2x_enable_ptp_packets(bp); + if (rc) + return rc; + + /* Enable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); + + return 0; +} + +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + BNX2X_ERR("config.flags is reserved for future use\n"); + return -EINVAL; + } + + bp->hwtstamp_ioctl_called = 1; + bp->tx_type = config.tx_type; + bp->rx_filter = config.rx_filter; + + rc = bnx2x_configure_ptp_filters(bp); + if (rc) + return rc; + + config.rx_filter = bp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Configrues HW for PTP */ +static int bnx2x_configure_ptp(struct bnx2x *bp) +{ + int rc, port = BP_PORT(bp); + u32 wb_data[2]; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable PTP packets to host - will be configured in the IOCTL*/ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Enable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x3F); + + /* Enable the free-running counter */ + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); + + /* Reset drift register (offset register is not reset) */ + rc = bnx2x_send_reset_timesync_ramrod(bp); + if (rc) { + BNX2X_ERR("Failed to reset PHC drift register\n"); + return -EFAULT; + } + + /* Reset possibly old timestamps */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + + return 0; +} + +/* Called during load, to initialize PTP-related stuff */ +void bnx2x_init_ptp(struct bnx2x *bp) +{ + int rc; + + /* Configure PTP in HW */ + rc = bnx2x_configure_ptp(bp); + if (rc) { + BNX2X_ERR("Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (!bp->timecounter_init_done) { + bnx2x_init_cyclecounter(bp); + timecounter_init(&bp->timecounter, &bp->cyclecounter, + ktime_to_ns(ktime_get_real())); + bp->timecounter_init_done = 1; + } + + DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 2beb5430b876..b0779d773343 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2182,6 +2182,45 @@ #define NIG_REG_P0_HWPFC_ENABLE 0x18078 #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4 +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac /* [RW 1] Input enable for RX MAC interface. */ #define NIG_REG_P0_MAC_IN_EN 0x185ac /* [RW 1] Output enable for TX MAC interface */ @@ -2194,6 +2233,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P0_PTP_EN 0x18788 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2300,7 +2350,46 @@ * Ethernet header. */ #define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 -#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4 /* [RW 32] Specify the client number to be assigned to each priority of the * strict priority arbiter. This register specifies bits 31:0 of the 36-bit * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 @@ -2342,6 +2431,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P1_PTP_EN 0x187b0 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2361,6 +2461,78 @@ #define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c /* [R 1] TLLH FIFO is empty. */ #define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc /* [RW 32] Specify which of the credit registers the client is to be mapped * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are * for client 0; bits [35:32] are for client 8. For clients that are not @@ -2513,6 +2685,10 @@ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then ort swap is equal to ~nig_registers_port_swap.port_swap */ #define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [WB 64] Addresses for TimeSync related registers in the timesync + * generator sub-module. + */ +#define NIG_REG_TIMESYNC_GEN_REG 0x18800 /* [RW 1] output enable for RX_XCM0 IF */ #define NIG_REG_XCM0_OUT_EN 0x100f0 /* [RW 1] output enable for RX_XCM1 IF */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index b1936044767a..798e97fb63f7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; struct eth_rss_update_ramrod_data *data = (struct eth_rss_update_ramrod_data *)(r->rdata); + u16 caps = 0; u8 rss_mode = 0; int rc; @@ -4042,28 +4043,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp, /* RSS capabilities */ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + + data->capabilities = cpu_to_le16(caps); /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; @@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, test_bit(BNX2X_Q_FLG_FCOE, flags) ? LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); } @@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); tx_data->force_default_pri_flg = test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); - + tx_data->refuse_outband_vlan_flg = + test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); tx_data->tunnel_lso_inc_ip_id = test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); tx_data->tunnel_non_lso_pcsum_location = - test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : - PCSUM_ON_BD; + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; @@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, data->tx_switching_change_flg = test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, ¶ms->update_flags); + + /* PTP */ + data->handle_ptp_pkts_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); + data->handle_ptp_pkts_change_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); } static inline int bnx2x_q_send_update(struct bnx2x *bp, @@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_STARTED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + else if (cmd == BNX2X_F_CMD_TX_STOP) next_state = BNX2X_F_STATE_TX_STOPPED; @@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_TX_STOPPED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + else if (cmd == BNX2X_F_CMD_TX_START) next_state = BNX2X_F_STATE_STARTED; @@ -5652,8 +5669,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; - rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + rdata->tunnel_mode = start_params->tunnel_mode; + rdata->gre_tunnel_type = start_params->gre_tunnel_type; + rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; + rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->sd_vlan_eth_type = cpu_to_le16(0x8100); /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element @@ -5680,8 +5700,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->tx_switch_suspend_change_flg = 1; - rdata->tx_switch_suspend = switch_update_params->suspend; + if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes)) { + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = + test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + } + + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes)) { + rdata->update_tunn_cfg_flg = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + &switch_update_params->changes)) + rdata->tunn_clss_en = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + &switch_update_params->changes)) + rdata->inner_gre_rss_en = 1; + rdata->tunnel_mode = switch_update_params->tunnel_mode; + rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; + rdata->vxlan_dst_port = cpu_to_le16(4789); + } + rdata->echo = SWITCH_UPDATE; /* No need for an explicit memory barrier here as long as we @@ -5817,6 +5857,40 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, U64_LO(data_mapping), NONE_CONNECTION_TYPE); } +static inline +int bnx2x_func_send_set_timesync(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct set_timesync_ramrod_data *rdata = + (struct set_timesync_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_set_timesync_params *set_timesync_params = + ¶ms->params.set_timesync; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; + rdata->offset_cmd = set_timesync_params->offset_cmd; + rdata->add_sub_drift_adjust_value = + set_timesync_params->add_sub_drift_adjust_value; + rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; + rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; + rdata->offset_delta.lo = U64_LO(set_timesync_params->offset_delta); + rdata->offset_delta.hi = U64_HI(set_timesync_params->offset_delta); + + DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", + rdata->drift_adjust_cmd, rdata->offset_cmd, + rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, + rdata->drift_adjust_period, rdata->offset_delta.lo, + rdata->offset_delta.hi); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + static int bnx2x_func_send_cmd(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5839,6 +5913,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, return bnx2x_func_send_tx_start(bp, params); case BNX2X_F_CMD_SWITCH_UPDATE: return bnx2x_func_send_switch_update(bp, params); + case BNX2X_F_CMD_SET_TIMESYNC: + return bnx2x_func_send_set_timesync(bp, params); default: BNX2X_ERR("Unknown command: %d\n", params->cmd); return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 718ecd294661..21c8f6fb89e5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -711,6 +711,7 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, + BNX2X_RSS_GRE_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -769,7 +770,9 @@ enum { BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM, BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, - BNX2X_Q_UPDATE_TX_SWITCHING + BNX2X_Q_UPDATE_TX_SWITCHING, + BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + BNX2X_Q_UPDATE_PTP_PKTS, }; /* Allowed Queue states */ @@ -831,6 +834,7 @@ enum { BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, BNX2X_Q_FLG_PCSUM_ON_PKT, BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; @@ -851,6 +855,10 @@ enum bnx2x_q_type { #define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) +/* DMAE channel to be used by FW for timesync workaroun. A driver that sends + * timesync-related ramrods must not use this DMAE command ID. + */ +#define FW_DMAE_CMD_ID 6 struct bnx2x_queue_init_params { struct { @@ -1085,6 +1093,16 @@ struct bnx2x_queue_sp_obj { }; /********************** Function state update *********************************/ + +/* UPDATE command options */ +enum { + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, +}; + /* Allowed Function states */ enum bnx2x_func_state { BNX2X_F_STATE_RESET, @@ -1105,6 +1123,7 @@ enum bnx2x_func_cmd { BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_START, BNX2X_F_CMD_SWITCH_UPDATE, + BNX2X_F_CMD_SET_TIMESYNC, BNX2X_F_CMD_MAX, }; @@ -1146,18 +1165,25 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* NVGRE classification enablement */ - u8 nvgre_clss_en; + /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ + u8 tunnel_mode; - /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_mode; + /* tunneling classification enablement */ + u8 tunn_clss_en; - /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ - u8 gre_tunnel_rss; + /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_type; + + /* Enables Inner GRE RSS on the function, depends on the client RSS + * capailities + */ + u8 inner_gre_rss_en; }; struct bnx2x_func_switch_update_params { - u8 suspend; + unsigned long changes; /* BNX2X_F_UPDATE_XX bits */ + u8 tunnel_mode; + u8 gre_tunnel_type; }; struct bnx2x_func_afex_update_params { @@ -1172,6 +1198,7 @@ struct bnx2x_func_afex_viflists_params { u8 afex_vif_list_command; u8 func_to_clear; }; + struct bnx2x_func_tx_start_params { struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; @@ -1179,6 +1206,24 @@ struct bnx2x_func_tx_start_params { u8 dont_add_pri_0_en; }; +struct bnx2x_func_set_timesync_params { + /* Reset, set or keep the current drift value */ + u8 drift_adjust_cmd; + + /* Dec, inc or keep the current offset */ + u8 offset_cmd; + + /* Drift value direction */ + u8 add_sub_drift_adjust_value; + + /* Drift, period and offset values to be used according to the commands + * above. + */ + u8 drift_adjust_value; + u32 drift_adjust_period; + u64 offset_delta; +}; + struct bnx2x_func_state_params { struct bnx2x_func_sp_obj *f_obj; @@ -1197,6 +1242,7 @@ struct bnx2x_func_state_params { struct bnx2x_func_afex_update_params afex_update; struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_tx_start_params tx_start; + struct bnx2x_func_set_timesync_params set_timesync; } params; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 662310c5f4e9..c88b20af87df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev) return dev->bus->self && dev->bus->self->ari_enabled; } -static void +static int bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; @@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); + return BP_VFDB(bp)->vf_sbs_pool; } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, } /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ - bnx2x_get_vf_igu_cam_info(bp); + if (!bnx2x_get_vf_igu_cam_info(bp)) { + BNX2X_ERR("No entries in IGU CAM for vfs\n"); + err = -EINVAL; + goto failed; + } /* allocate the queue arrays for all VFs */ bp->vfdb->vfqs = kzalloc( BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), GFP_KERNEL); - DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); - if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) if (!IS_SRIOV(bp)) return; - DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); - pci_disable_sriov(bp->pdev); - DP(BNX2X_MSG_IOV, "sriov disabled\n"); + bnx2x_disable_sriov(bp); /* disable access to all VFs */ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { @@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -static inline -struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) -{ - int i; - struct bnx2x_virtf *vf = NULL; - - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - if (stat_id >= vf->igu_base_id && - stat_id < vf->igu_base_id + vf_sb_count(vf)) - break; - } - return vf; -} - /* VF API helpers */ static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, u8 enable) @@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) return rc; } -static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, - struct bnx2x_virtf *vf, u32 *sbdf) -{ - *sbdf = vf->devfn | (vf->bus << 8); -} - void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { @@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", - vf->abs_vfid, vf->op_current); + vf->abs_vfid, current_tlv); } static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) @@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) bp->requested_nr_virtfn = num_vfs_param; if (num_vfs_param == 0) { bnx2x_set_pf_tx_switching(bp, false); - pci_disable_sriov(dev); + bnx2x_disable_sriov(bp); return 0; } else { return bnx2x_enable_sriov(bp); @@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) void bnx2x_disable_sriov(struct bnx2x *bp) { + if (pci_vfs_assigned(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return; + } + pci_disable_sriov(bp->pdev); } @@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, } if (!IS_SRIOV(bp)) { - BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); + BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index ca1055f3d8af..01bafa4ac045 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -299,7 +299,8 @@ struct bnx2x_vfdb { #define BP_VFDB(bp) ((bp)->vfdb) /* vf array */ struct bnx2x_virtf *vfs; -#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) +#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ + &((bp)->vfdb->vfs[idx]) : NULL) #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) /* queue array - for all vfs */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index ca47665f94bf..e1c819396d1a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) cpu_to_le16(bp->stats_counter++); DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", - bp->fw_stats_req->hdr.drv_stats_counter); + le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); /* adjust the ramrod to include VF queues statistics */ bnx2x_iov_adjust_stats_req(bp); @@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } } -static int bnx2x_stats_comp(struct bnx2x *bp) +static void bnx2x_stats_comp(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); int cnt = 10; @@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp) cnt--; usleep_range(1000, 2000); } - return 1; } /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 54e0427a9ee6..b1d9c44aa56c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; flags |= VFPF_QUEUE_FLG_VLAN; - DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); /* Common */ req->vf_qid = fp_idx; @@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) REG_WR8(bp, addr, 1); } -static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) -{ - int i; - - for_each_vf(bp, i) - storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); -} - /* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { |