diff options
author | Linus Torvalds | 2018-02-19 11:58:19 -0800 |
---|---|---|
committer | Linus Torvalds | 2018-02-19 11:58:19 -0800 |
commit | 79c0ef3e85c015b0921a8fd5dd539d1480e9cd6c (patch) | |
tree | 84c85e3aa7ff0d25ab62af3d3bfb896888d5219e /drivers | |
parent | 91ab883eb21325ad80f3473633f794c78ac87f51 (diff) | |
parent | 506b0a395f26e52b3f18827e0de1be051acb77ab (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Prevent index integer overflow in ptr_ring, from Jason Wang.
2) Program mvpp2 multicast filter properly, from Mikulas Patocka.
3) The bridge brport attribute file is write only and doesn't have a
->show() method, don't blindly invoke it. From Xin Long.
4) Inverted mask used in genphy_setup_forced(), from Ingo van Lil.
5) Fix multiple definition issue with if_ether.h UAPI header, from
Hauke Mehrtens.
6) Fix GFP_KERNEL usage in atomic in RDS protocol code, from Sowmini
Varadhan.
7) Revert XDP redirect support from thunderx driver, it is not
implemented properly. From Jesper Dangaard Brouer.
8) Fix missing RTNL protection across some tipc operations, from Ying
Xue.
9) Return the correct IV bytes in the TLS getsockopt code, from Boris
Pismenny.
10) Take tclassid into consideration properly when doing FIB rule
matching. From Stefano Brivio.
11) cxgb4 device needs more PCI VPD quirks, from Casey Leedom.
12) TUN driver doesn't align frags properly, and we can end up doing
unaligned atomics on misaligned metadata. From Eric Dumazet.
13) Fix various crashes found using DEBUG_PREEMPT in rmnet driver, from
Subash Abhinov Kasiviswanathan.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits)
tg3: APE heartbeat changes
mlxsw: spectrum_router: Do not unconditionally clear route offload indication
net: qualcomm: rmnet: Fix possible null dereference in command processing
net: qualcomm: rmnet: Fix warning seen with 64 bit stats
net: qualcomm: rmnet: Fix crash on real dev unregistration
sctp: remove the left unnecessary check for chunk in sctp_renege_events
rxrpc: Work around usercopy check
tun: fix tun_napi_alloc_frags() frag allocator
udplite: fix partial checksum initialization
skbuff: Fix comment mis-spelling.
dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock
PCI/cxgb4: Extend T3 PCI quirk to T4+ devices
cxgb4: fix trailing zero in CIM LA dump
cxgb4: free up resources of pf 0-3
fib_semantics: Don't match route with mismatching tclassid
NFC: llcp: Limit size of SDP URI
tls: getsockopt return record sequence number
tls: reset the crypto info if copy_from_user fails
tls: retrun the correct IV in getsockopt
docs: segmentation-offloads.txt: add SCTP info
...
Diffstat (limited to 'drivers')
22 files changed, 224 insertions, 257 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a77ee2f8fb8d..c1841db1b500 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); - udelay(10); + usleep_range(10, 20); timeout_us -= (timeout_us > 10) ? 10 : timeout_us; } @@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event) if (!(apedata & APE_FW_STATUS_READY)) return -EAGAIN; - /* Wait for up to 1 millisecond for APE to service previous event. */ - err = tg3_ape_event_lock(tp, 1000); + /* Wait for up to 20 millisecond for APE to service previous event. */ + err = tg3_ape_event_lock(tp, 20000); if (err) return err; @@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) switch (kind) { case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, APE_HOST_SEG_SIG_MAGIC); tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, @@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_START; break; case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - if (device_may_wakeup(&tp->pdev->dev) && tg3_flag(tp, WOL_ENABLE)) { tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, @@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) tg3_ape_send_event(tp, event); } +static void tg3_send_ape_heartbeat(struct tg3 *tp, + unsigned long interval) +{ + /* Check if hb interval has exceeded */ + if (!tg3_flag(tp, ENABLE_APE) || + time_before(jiffies, tp->ape_hb_jiffies + interval)) + return; + + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); + tp->ape_hb_jiffies = jiffies; +} + static void tg3_disable_ints(struct tg3 *tp) { int i; @@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) } } + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); return work_done; tx_recovery: @@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) } } + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); return work_done; tx_recovery: @@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, ENABLE_APE)) /* Write our heartbeat update interval to APE. */ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, - APE_HOST_HEARTBEAT_INT_DISABLE); + APE_HOST_HEARTBEAT_INT_5SEC); tg3_write_sig_post_reset(tp, RESET_KIND_INIT); @@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t) tp->asf_counter = tp->asf_multiplier; } + /* Update the APE heartbeat every 5 seconds.*/ + tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); + spin_unlock(&tp->lock); restart_timer: @@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) pci_state_reg); tg3_ape_lock_init(tp); + tp->ape_hb_interval = + msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); } /* Set up tp->grc_local_ctrl before calling diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 47f51cc0566d..1d61aa3efda1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2508,6 +2508,7 @@ #define TG3_APE_LOCK_PHY3 5 #define TG3_APE_LOCK_GPIO 7 +#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval) #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 @@ -3423,6 +3424,10 @@ struct tg3 { struct device *hwmon_dev; bool link_up; bool pcierr_recovery; + + u32 ape_hb; + unsigned long ape_hb_interval; + unsigned long ape_hb_jiffies; }; /* Accessor macros for chip and asic attributes diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index c87c9c684a33..d59497a7bdce 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get); void cavium_ptp_put(struct cavium_ptp *ptp) { + if (!ptp) + return; pci_dev_put(ptp->pdev); } EXPORT_SYMBOL(cavium_ptp_put); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index b68cde9f17d2..7d9c5ffbd041 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -struct nicvf_xdp_tx { - u64 dma_addr; - u8 qidx; -}; - static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic) return 0; } -static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) -{ - /* Check if it's a recycled page, if not unmap the DMA mapping. - * Recycled page holds an extra reference. - */ - if (page_ref_count(page) == 1) { - dma_addr &= PAGE_MASK; - dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, - RCV_FRAG_LEN + XDP_HEADROOM, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - } -} - static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, struct cqe_rx_t *cqe_rx, struct snd_queue *sq, struct rcv_queue *rq, struct sk_buff **skb) { struct xdp_buff xdp; struct page *page; - struct nicvf_xdp_tx *xdp_tx = NULL; u32 action; - u16 len, err, offset = 0; + u16 len, offset = 0; u64 dma_addr, cpu_addr; void *orig_data; @@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, cpu_addr = (u64)phys_to_virt(cpu_addr); page = virt_to_page((void *)cpu_addr); - xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; + xdp.data_hard_start = page_address(page); xdp.data = (void *)cpu_addr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; @@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, switch (action) { case XDP_PASS: - nicvf_unmap_page(nic, page, dma_addr); + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } /* Build SKB and pass on packet to network stack */ *skb = build_skb(xdp.data, @@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, case XDP_TX: nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); return true; - case XDP_REDIRECT: - /* Save DMA address for use while transmitting */ - xdp_tx = (struct nicvf_xdp_tx *)page_address(page); - xdp_tx->dma_addr = dma_addr; - xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); - - err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); - if (!err) - return true; - - /* Free the page on error */ - nicvf_unmap_page(nic, page, dma_addr); - put_page(page); - break; default: bpf_warn_invalid_xdp_action(action); /* fall through */ @@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, trace_xdp_exception(nic->netdev, prog, action); /* fall through */ case XDP_DROP: - nicvf_unmap_page(nic, page, dma_addr); + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } put_page(page); return true; } @@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) } } -static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) -{ - struct nicvf *nic = netdev_priv(netdev); - struct nicvf *snic = nic; - struct nicvf_xdp_tx *xdp_tx; - struct snd_queue *sq; - struct page *page; - int err, qidx; - - if (!netif_running(netdev) || !nic->xdp_prog) - return -EINVAL; - - page = virt_to_page(xdp->data); - xdp_tx = (struct nicvf_xdp_tx *)page_address(page); - qidx = xdp_tx->qidx; - - if (xdp_tx->qidx >= nic->xdp_tx_queues) - return -EINVAL; - - /* Get secondary Qset's info */ - if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { - qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; - snic = (struct nicvf *)nic->snicvf[qidx - 1]; - if (!snic) - return -EINVAL; - qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; - } - - sq = &snic->qs->sq[qidx]; - err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, - xdp_tx->dma_addr, - xdp->data_end - xdp->data); - if (err) - return -ENOMEM; - - nicvf_xdp_sq_doorbell(snic, sq, qidx); - return 0; -} - -static void nicvf_xdp_flush(struct net_device *dev) -{ - return; -} - static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct hwtstamp_config config; @@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_xdp_xmit = nicvf_xdp_xmit, - .ndo_xdp_flush = nicvf_xdp_flush, .ndo_do_ioctl = nicvf_ioctl, }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 3eae9ff9b53a..d42704d07484 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, /* Reserve space for header modifications by BPF program */ if (rbdr->is_xdp) - buf_len += XDP_HEADROOM; + buf_len += XDP_PACKET_HEADROOM; /* Check if it's recycled */ if (pgcache) @@ -224,9 +224,8 @@ ret: nic->rb_page = NULL; return -ENOMEM; } - if (pgcache) - pgcache->dma_addr = *rbuf + XDP_HEADROOM; + pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; nic->rb_page_offset += buf_len; } @@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, int qentry; if (subdesc_cnt > sq->xdp_free_cnt) - return -1; + return 0; qentry = nicvf_get_sq_desc(sq, subdesc_cnt); @@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, sq->xdp_desc_cnt += subdesc_cnt; - return 0; + return 1; } /* Calculate no of SQ subdescriptors needed to transmit all @@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, if (page_ref_count(page) != 1) return; - len += XDP_HEADROOM; + len += XDP_PACKET_HEADROOM; /* Receive buffers in XDP mode are mapped from page start */ dma_addr &= PAGE_MASK; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index ce1eed7a6d63..5e9a03cf1b4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -11,7 +11,6 @@ #include <linux/netdevice.h> #include <linux/iommu.h> -#include <linux/bpf.h> #include <net/xdp.h> #include "q_struct.h" @@ -94,9 +93,6 @@ #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */ -#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) - #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 557fd8bfd54e..00a1d2d13169 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, if (is_t6(padap->params.chip)) { size = padap->params.cim_la_size / 10 + 1; - size *= 11 * sizeof(u32); + size *= 10 * sizeof(u32); } else { size = padap->params.cim_la_size / 8; size *= 8 * sizeof(u32); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 30485f9a598f..143686c60234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_CIM_LA: if (is_t6(adap->params.chip)) { len = adap->params.cim_la_size / 10 + 1; - len *= 11 * sizeof(u32); + len *= 10 * sizeof(u32); } else { len = adap->params.cim_la_size / 8; len *= 8 * sizeof(u32); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 56bc626ef006..7b452e85de2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) pcie_fw = readl(adap->regs + PCIE_FW_A); /* Check if cxgb4 is the MASTER and fw is initialized */ - if (!(pcie_fw & PCIE_FW_INIT_F) || + if (num_vfs && + (!(pcie_fw & PCIE_FW_INIT_F) || !(pcie_fw & PCIE_FW_MASTER_VLD_F) || - PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { + PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { dev_warn(&pdev->dev, "cxgb4 driver needs to be MASTER to support SRIOV\n"); return -EOPNOTSUPP; @@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev) #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); #endif - iounmap(adapter->regs); if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); - pci_disable_pcie_error_reporting(pdev); - if ((adapter->flags & DEV_ENABLED)) { - pci_disable_device(pdev); - adapter->flags &= ~DEV_ENABLED; - } - pci_release_regions(pdev); - kfree(adapter->mbox_log); - synchronize_rcu(); - kfree(adapter); } #ifdef CONFIG_PCI_IOV else { cxgb4_iov_configure(adapter->pdev, 0); } #endif + iounmap(adapter->regs); + pci_disable_pcie_error_reporting(pdev); + if ((adapter->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adapter->flags &= ~DEV_ENABLED; + } + pci_release_regions(pdev); + kfree(adapter->mbox_log); + synchronize_rcu(); + kfree(adapter); } /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 047609ef0515..920bccd6bc40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 @@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - /* We have two VPD data structures stored in the adapter VPD area. - * By default, Linux calculates the size of the VPD area by traversing - * the first VPD area at offset 0x0, so we need to tell the OS what - * our real VPD size is. - */ - ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); - if (ret < 0) - goto out; - /* Card information normally starts at VPD_BASE but early cards had * it at 0. */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 27447260215d..996f47568f9e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev) return 0; } +static void release_login_buffer(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->login_buf); + adapter->login_buf = NULL; +} + +static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->login_rsp_buf); + adapter->login_rsp_buf = NULL; +} + static void release_resources(struct ibmvnic_adapter *adapter) { int i; @@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter) } } } + kfree(adapter->napi); + adapter->napi = NULL; + + release_login_rsp_buffer(adapter); } static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) @@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev) return rc; } +static void clean_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + u64 rx_entries; + int rx_scrqs; + int i, j; + + if (!adapter->rx_pool) + return; + + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_entries = adapter->req_rx_add_entries_per_subcrq; + + /* Free any remaining skbs in the rx buffer pools */ + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + if (!rx_pool) + continue; + + netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); + for (j = 0; j < rx_entries; j++) { + if (rx_pool->rx_buff[j].skb) { + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; + } + } + } +} + static void clean_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev) } } } - + clean_rx_pools(adapter); clean_tx_pools(adapter); adapter->state = VNIC_CLOSED; return rc; @@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } - netif_carrier_on(netdev); - /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); @@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (adapter->reset_reason != VNIC_RESET_FAILOVER) netdev_notify_peers(netdev); + netif_carrier_on(netdev); + return 0; } @@ -1853,6 +1898,7 @@ restart_poll: be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; + dev_kfree_skb_any(rx_buff->skb); remove_buff_from_pool(adapter, rx_buff); continue; } @@ -3013,6 +3059,7 @@ static void send_login(struct ibmvnic_adapter *adapter) struct vnic_login_client_data *vlcd; int i; + release_login_rsp_buffer(adapter); client_data_len = vnic_client_data_len(adapter); buffer_size = @@ -3708,6 +3755,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, DMA_BIDIRECTIONAL); + release_login_buffer(adapter); dma_unmap_single(dev, adapter->login_rsp_buf_token, adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a1d7b88cf083..5a1668cdb461 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) int id = port->id; bool allmulti = dev->flags & IFF_ALLMULTI; +retry: mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); @@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) /* Remove all port->id's mcast enries */ mvpp2_prs_mcast_del_all(priv, id); - if (allmulti && !netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(ha, dev) - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + if (!allmulti) { + netdev_for_each_mc_addr(ha, dev) { + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { + allmulti = true; + goto retry; + } + } } } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0b25baba09a..f7948e983637 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id, struct netlink_ext_ack *extack) { + struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; int err; @@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); } - vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->fib4)) - return ERR_CAST(vr->fib4); - vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); - if (IS_ERR(vr->fib6)) { - err = PTR_ERR(vr->fib6); + fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(fib4)) + return ERR_CAST(fib4); + fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib6)) { + err = PTR_ERR(fib6); goto err_fib6_create; } - vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, - MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->mr4_table)) { - err = PTR_ERR(vr->mr4_table); + mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(mr4_table)) { + err = PTR_ERR(mr4_table); goto err_mr_table_create; } + vr->fib4 = fib4; + vr->fib6 = fib6; + vr->mr4_table = mr4_table; vr->tb_id = tb_id; return vr; err_mr_table_create: - mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); - vr->fib6 = NULL; + mlxsw_sp_fib_destroy(mlxsw_sp, fib6); err_fib6_create: - mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); - vr->fib4 = NULL; + mlxsw_sp_fib_destroy(mlxsw_sp, fib4); return ERR_PTR(err); } @@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; int i; + if (!list_is_singular(&nh_grp->fib_list)) + return; + for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 7e7704daf5f1..c4949183eef3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -43,12 +43,6 @@ /* Local Definitions and Declarations */ -struct rmnet_walk_data { - struct net_device *real_dev; - struct list_head *head; - struct rmnet_port *port; -}; - static int rmnet_is_real_dev_registered(const struct net_device *real_dev) { return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; @@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev) static void rmnet_unregister_bridge(struct net_device *dev, struct rmnet_port *port) { - struct net_device *rmnet_dev, *bridge_dev; struct rmnet_port *bridge_port; + struct net_device *bridge_dev; if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) return; /* bridge slave handling */ if (!port->nr_rmnet_devs) { - rmnet_dev = netdev_master_upper_dev_get_rcu(dev); - netdev_upper_dev_unlink(dev, rmnet_dev); - bridge_dev = port->bridge_ep; bridge_port = rmnet_get_port_rtnl(bridge_dev); @@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev, bridge_dev = port->bridge_ep; bridge_port = rmnet_get_port_rtnl(bridge_dev); - rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); - netdev_upper_dev_unlink(bridge_dev, rmnet_dev); - rmnet_unregister_real_device(bridge_dev, bridge_port); } } @@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err1; - err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); - if (err) - goto err2; - port->rmnet_mode = mode; hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); @@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, return 0; -err2: - rmnet_vnd_dellink(mux_id, port, ep); err1: rmnet_unregister_real_device(real_dev, port); err0: @@ -204,14 +186,13 @@ err0: static void rmnet_dellink(struct net_device *dev, struct list_head *head) { + struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; struct rmnet_endpoint *ep; struct rmnet_port *port; u8 mux_id; - rcu_read_lock(); - real_dev = netdev_master_upper_dev_get_rcu(dev); - rcu_read_unlock(); + real_dev = priv->real_dev; if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) return; @@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) port = rmnet_get_port_rtnl(real_dev); mux_id = rmnet_vnd_get_mux(dev); - netdev_upper_dev_unlink(dev, real_dev); ep = rmnet_get_endpoint(port, mux_id); if (ep) { @@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(dev, head); } -static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) -{ - struct rmnet_walk_data *d = data; - struct rmnet_endpoint *ep; - u8 mux_id; - - mux_id = rmnet_vnd_get_mux(rmnet_dev); - ep = rmnet_get_endpoint(d->port, mux_id); - if (ep) { - hlist_del_init_rcu(&ep->hlnode); - rmnet_vnd_dellink(mux_id, d->port, ep); - kfree(ep); - } - netdev_upper_dev_unlink(rmnet_dev, d->real_dev); - unregister_netdevice_queue(rmnet_dev, d->head); - - return 0; -} - static void rmnet_force_unassociate_device(struct net_device *dev) { struct net_device *real_dev = dev; - struct rmnet_walk_data d; + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; struct rmnet_port *port; + unsigned long bkt_ep; LIST_HEAD(list); if (!rmnet_is_real_dev_registered(real_dev)) @@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev) ASSERT_RTNL(); - d.real_dev = real_dev; - d.head = &list; - port = rmnet_get_port_rtnl(dev); - d.port = port; rcu_read_lock(); rmnet_unregister_bridge(dev, port); - netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + unregister_netdevice_queue(ep->egress_dev, &list); + rmnet_vnd_dellink(ep->mux_id, port, ep); + + hlist_del_init_rcu(&ep->hlnode); + kfree(ep); + } + rcu_read_unlock(); unregister_netdevice_many(&list); @@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (err) return -EBUSY; - err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, - extack); - if (err) - return -EINVAL; - slave_port = rmnet_get_port(slave_dev); slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; slave_port->bridge_ep = real_dev; @@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, port->rmnet_mode = RMNET_EPMODE_VND; port->bridge_ep = NULL; - netdev_upper_dev_unlink(slave_dev, rmnet_dev); slave_port = rmnet_get_port(slave_dev); rmnet_unregister_real_device(slave_dev, slave_port); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 6bc328fb88e1..b0dbca070c00 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, } ep = rmnet_get_endpoint(port, mux_id); + if (!ep) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + vnd = ep->egress_dev; ip_family = cmd->flow_control.ip_family; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 570a227acdd8..346d310914df 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev, memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); for_each_possible_cpu(cpu) { - pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); do { start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..a95fbd5510d9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev) /* Enable MagicPacket */ ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); - /* Increased clock usage so device won't be suspended */ - clk_enable(priv->clk); - return enable_irq_wake(priv->emac_irq); } @@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev) if (ret < 0) return ret; - /* Restore clock usage count */ - clk_disable(priv->clk); - return disable_irq_wake(priv->emac_irq); } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a197e11f3a56..92dcf8717fc6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -40,7 +40,6 @@ #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> -#include <linux/clk.h> #include <linux/sh_eth.h> #include <linux/of_mdio.h> @@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) wol->supported = 0; wol->wolopts = 0; - if (mdp->cd->magic && mdp->clk) { + if (mdp->cd->magic) { wol->supported = WAKE_MAGIC; wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; } @@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) + if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } - /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ - mdp->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(mdp->clk)) - mdp->clk = NULL; - ndev->base_addr = res->start; spin_lock_init(&mdp->lock); @@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (ret) goto out_napi_del; - if (mdp->cd->magic && mdp->clk) + if (mdp->cd->magic) device_set_wakeup_capable(&pdev->dev, 1); /* print device information */ @@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev) /* Enable MagicPacket */ sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); - /* Increased clock usage so device won't be suspended */ - clk_enable(mdp->clk); - return enable_irq_wake(ndev->irq); } @@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev) if (ret < 0) return ret; - /* Restore clock usage count */ - clk_disable(mdp->clk); - return disable_irq_wake(ndev->irq); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b13eed21c87d..d39ae77707ef 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev) ctl |= BMCR_FULLDPLX; return phy_modify(phydev, MII_BMCR, - BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); + ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl); } EXPORT_SYMBOL(genphy_setup_forced); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index ca5e375de27c..e0d6760f3219 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -166,6 +166,8 @@ struct tbnet_ring { * @connected_work: Worker that finalizes the ThunderboltIP connection * setup and enables DMA paths for high speed data * transfers + * @disconnect_work: Worker that handles tearing down the ThunderboltIP + * connection * @rx_hdr: Copy of the currently processed Rx frame. Used when a * network packet consists of multiple Thunderbolt frames. * In host byte order. @@ -190,6 +192,7 @@ struct tbnet { int login_retries; struct delayed_work login_work; struct work_struct connected_work; + struct work_struct disconnect_work; struct thunderbolt_ip_frame_header rx_hdr; struct tbnet_ring rx_ring; atomic_t frame_id; @@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) case TBIP_LOGOUT: ret = tbnet_logout_response(net, route, sequence, command_id); if (!ret) - tbnet_tear_down(net, false); + queue_work(system_long_wq, &net->disconnect_work); break; default: @@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work) } } +static void tbnet_disconnect_work(struct work_struct *work) +{ + struct tbnet *net = container_of(work, typeof(*net), disconnect_work); + + tbnet_tear_down(net, false); +} + static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, const struct thunderbolt_ip_frame_header *hdr) { @@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev) napi_disable(&net->napi); + cancel_work_sync(&net->disconnect_work); tbnet_tear_down(net, true); tb_ring_free(net->rx_ring.ring); @@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) net = netdev_priv(dev); INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); INIT_WORK(&net->connected_work, tbnet_connected_work); + INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); mutex_init(&net->connection_lock); atomic_set(&net->command_id, 0); atomic_set(&net->frame_id, 0); @@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) stop_login(net); if (netif_running(net->dev)) { netif_device_detach(net->dev); - tb_ring_stop(net->rx_ring.ring); - tb_ring_stop(net->tx_ring.ring); - tbnet_free_buffers(&net->rx_ring); - tbnet_free_buffers(&net->tx_ring); + tbnet_tear_down(net, true); } return 0; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..b52258c327d2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { + struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; - unsigned long offset; - struct page *page; - void *data; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - local_bh_disable(); - data = napi_alloc_frag(fragsz); - local_bh_enable(); - if (!data) { + if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { err = -ENOMEM; goto free; } - page = virt_to_head_page(data); - offset = data - page_address(page); - skb_fill_page_desc(skb, i - 1, page, offset, fragsz); + skb_fill_page_desc(skb, i - 1, pfrag->page, + pfrag->offset, fragsz); + page_ref_inc(pfrag->page); + pfrag->offset += fragsz; } return skb; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fc734014206f..8b14bd326d4a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { - pci_set_vpd_size(dev, 8192); -} - -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); + int chip = (dev->device & 0xf000) >> 12; + int func = (dev->device & 0x0f00) >> 8; + int prod = (dev->device & 0x00ff) >> 0; + + /* + * If this is a T3-based adapter, there's a 1KB VPD area at offset + * 0xc00 which contains the preferred VPD values. If this is a T4 or + * later based adapter, the special VPD is at offset 0x400 for the + * Physical Functions (the SR-IOV Virtual Functions have no VPD + * Capabilities). The PCI VPD Access core routines will normally + * compute the size of the VPD by parsing the VPD Data Structure at + * offset 0x000. This will result in silent failures when attempting + * to accesses these other VPD areas which are beyond those computed + * limits. + */ + if (chip == 0x0 && prod >= 0x20) + pci_set_vpd_size(dev, 8192); + else if (chip >= 0x4 && func < 0x8) + pci_set_vpd_size(dev, 2048); +} + +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, + quirk_chelsio_extend_vpd); #ifdef CONFIG_ACPI /* |