aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/tty/pty.c63
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
59 files changed, 615 insertions, 494 deletions
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct of_phandle_args out_irq;
int ret;
- if (!parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
{
struct device_node *node;
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return;
+
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
}
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
- struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
- struct bcma_device *core, int num)
-{
- return 0;
-}
-#endif /* CONFIG_OF */
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..e93405f0eac4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
{
unsigned int new_freq;
+ if (cpufreq_suspended)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8b5a415ee14a..30fe323c4551 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1130,6 +1130,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
+ } else {
+ sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ core_busy = 0;
}
cpu->sample.busy_scaled = core_busy;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+
lock_sock(sk);
if (_pms(sk)->dev) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..a24c18eee598 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -195,6 +195,7 @@ config GENEVE
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u16 fid;
int i, err;
mutex_lock(&ps->smi_mutex);
- /* Get or create the bridge FID and assign it to the port */
- for (i = 0; i < ps->num_ports; ++i)
- if (ps->ports[i].bridge_dev == bridge)
- break;
-
- if (i < ps->num_ports)
- err = _mv88e6xxx_port_fid_get(ds, i, &fid);
- else
- err = _mv88e6xxx_fid_new(ds, &fid);
- if (err)
- goto unlock;
-
- err = _mv88e6xxx_port_fid_set(ds, port, fid);
- if (err)
- goto unlock;
-
/* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
}
}
-unlock:
mutex_unlock(&ps->smi_mutex);
return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
- u16 fid;
int i;
mutex_lock(&ps->smi_mutex);
- /* Give the port a fresh Filtering Information Database */
- if (_mv88e6xxx_fid_new(ds, &fid) ||
- _mv88e6xxx_port_fid_set(ds, port, fid))
- netdev_warn(ds->ports[port], "failed to assign a new FID\n");
-
/* Unassign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = NULL;
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* the other bits clear.
*/
reg = 1 << port;
- /* Disable learning for DSA and CPU ports */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+ /* Disable learning for CPU port */
+ if (dsa_is_cpu_port(ds, port))
+ reg = 0;
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (ret)
goto abort;
- /* Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+ ret = _mv88e6xxx_port_fid_set(ds, port, 0);
if (ret)
goto abort;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX)
+ if (lmac == MAX_LMAC_PER_BGX) {
+ of_node_put(node);
break;
+ }
}
- of_node_put(node);
return 0;
defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
void t4_free_sge_resources(struct adapter *adap)
{
int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_eth_rxq *eq;
+ struct sge_eth_txq *etq;
+
+ /* stop all Rx queues in order to start them draining */
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
+ if (eq->rspq.desc)
+ t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP,
+ eq->rspq.cntxt_id,
+ eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+ 0xffff);
+ }
/* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+
+ etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* We have two VPD data structures stored in the adapter VPD area.
+ * By default, Linux calculates the size of the VPD area by traversing
+ * the first VPD area at offset 0x0, so we need to tell the OS what
+ * our real VPD size is.
+ */
+ ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+ if (ret < 0)
+ goto out;
+
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_iq_stop - stop an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Stops an ingress queue and its associated FLs, if any. This causes
+ * any current or future data/messages destined for these queues to be
+ * tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40e_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40e_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40evf_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40evf_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40evf_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
(priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 2);
+ (priv->rx_ring_num * 3);
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
+ data[index++] = priv->rx_ring[i]->dropped;
}
spin_unlock_bh(&priv->stats_lock);
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_dropped", i);
}
break;
case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
+ sw_rx_dropped += priv->rx_ring[i]->dropped;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
stats->collisions = 0;
- stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+ sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
gfp_t gfp = _gfp;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
page = alloc_pages(gfp, order);
if (likely(page))
break;
@@ -126,7 +126,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc[i].page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
}
}
@@ -176,7 +178,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc->dma,
page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc->page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
page_alloc->page = NULL;
}
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
- priv->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
return 0;
}
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
- err = pci_enable_device(pdev);
+ err = mlx4_pci_enable_device(&priv->dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(&priv->dev);
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
+ mutex_init(&dev->persist->pci_status_mutex);
ret = devlink_register(devlink, &pdev->dev);
if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
}
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(dev);
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(persist->dev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
- int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
- int total_vfs;
+ int err;
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
- ret = pci_enable_device(pdev);
- if (ret) {
- mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+ err = mlx4_pci_enable_device(dev);
+ if (err) {
+ mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int err;
+
+ mlx4_err(dev, "%s was called\n", __func__);
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
- ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+ err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
priv, 1);
- if (ret) {
- mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
- __func__, ret);
+ if (err) {
+ mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+ __func__, err);
goto end;
}
- ret = restore_current_port_types(dev, dev->persist->
+ err = restore_current_port_types(dev, dev->persist->
curr_port_type, dev->persist->
curr_port_poss_type);
- if (ret)
- mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+ if (err)
+ mlx4_err(dev, "could not restore original port types (%d)\n", err);
}
end:
mutex_unlock(&persist->interface_state_mutex);
- return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
+ .resume = mlx4_pci_resume,
};
static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u8 pptx;
+ u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
struct mlx4_resource_tracker res_tracker;
struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ ((gen_context->pptx != master->pptx) ||
+ (gen_context->pprx != master->pprx))) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev,
+ "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
/* This function reuses the buffer(from an offset) from
* consumer index to producer index in the bd ring
*/
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
curr_cons->data = NULL;
}
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(edev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
return -ENOMEM;
+ }
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
len_on_bd);
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ atomic_inc(&current_bd->data->_count);
goto out;
}
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
return 0;
out:
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
return -ENOMEM;
}
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
tpa_info->skb = netdev_alloc_skb(edev->ndev,
le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
- return;
+ goto cons_buf;
}
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
/* This is needed in order to enable forwarding support */
qede_set_gro_params(edev, tpa_info->skb, cqe);
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (likely(cqe->ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct iphdr));
th = tcp_hdr(skb);
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
th = tcp_hdr(skb);
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
struct sk_buff *skb,
u16 vlan_tag)
{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
}
}
#endif
+
+send_skb:
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
rxq->rx_hw_errors++;
- qede_reuse_page(edev, rxq, sw_rx_data);
- goto next_rx;
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+ goto next_cqe;
}
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
"Build_skb failed, dropping incoming packet\n");
- qede_reuse_page(edev, rxq, sw_rx_data);
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
- goto next_rx;
+ goto next_cqe;
}
/* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
sw_rx_data))) {
DP_ERR(edev, "Failed to allocate rx buffer\n");
+ /* Incr page ref count to reuse on allocation
+ * failure so that it doesn't get freed while
+ * freeing SKB.
+ */
+
+ atomic_inc(&sw_rx_data->data->_count);
rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, edev,
+ fp_cqe->bd_num);
+ dev_kfree_skb_any(skb);
goto next_cqe;
}
}
+ qede_rx_bd_ring_consume(rxq);
+
if (fp_cqe->bd_num != 1) {
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
num_frags--) {
u16 cur_size = pkt_len > rxq->rx_buf_size ?
rxq->rx_buf_size : pkt_len;
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
- WARN_ONCE(!cur_size,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
-
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
goto next_cqe;
+ }
- rxq->sw_rx_cons++;
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- qed_chain_consume(&rxq->rx_bd_ring);
+ qede_rx_bd_ring_consume(rxq);
+
dma_unmap_page(&edev->pdev->dev,
sw_rx_data->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
pkt_len -= cur_size;
}
- if (pkt_len)
+ if (unlikely(pkt_len))
DP_ERR(edev,
"Mapped all BDs of jumbo, but still have %d bytes\n",
pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
- qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
- rxq->sw_rx_cons++;
next_rx_only:
rx_pkt++;
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
- if (replace_buf) {
+ if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
dma_unmap_addr(replace_buf, mapping),
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
- int i, rc, size, num_allocated;
+ int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ rc = -ENOMEM;
goto err;
}
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
rc = qede_alloc_rx_buffer(edev, rxq);
- if (rc)
- break;
- }
- num_allocated = i;
- if (!num_allocated) {
- DP_ERR(edev, "Rx buffers allocation failed\n");
- goto err;
- } else if (num_allocated < rxq->num_rx_buffers) {
- DP_NOTICE(edev,
- "Allocated less buffers than desired (%d allocated)\n",
- num_allocated);
+ if (rc) {
+ DP_ERR(edev,
+ "Rx buffers allocation failed at index %d\n", i);
+ goto err;
+ }
}
- qede_alloc_sge_mem(edev, rxq);
-
- return 0;
-
+ rc = qede_alloc_sge_mem(edev, rxq);
err:
- qede_free_mem_rxq(edev, rxq);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
}
return 0;
-
err:
- qede_free_mem_fp(edev, fp);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
struct qede_fastpath *fp = &edev->fp_array[rss_id];
rc = qede_alloc_mem_fp(edev, fp);
- if (rc)
- break;
- }
-
- if (rss_id != QEDE_RSS_CNT(edev)) {
- /* Failed allocating memory for all the queues */
- if (!rss_id) {
+ if (rc) {
DP_ERR(edev,
- "Failed to allocate memory for the leading queue\n");
- rc = -ENOMEM;
- } else {
- DP_NOTICE(edev,
- "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
- QEDE_RSS_CNT(edev), rss_id);
+ "Failed to allocate memory for fastpath - rss id = %d\n",
+ rss_id);
+ qede_free_mem_load(edev);
+ return rc;
}
- edev->num_rss = rss_id;
}
return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
rate = clk_get_rate(clk);
clk_put(clk);
+ if (!rate)
+ return -EINVAL;
+
inc = 1000000000ULL << 20;
do_div(inc, rate);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, false);
+ ret = sh_eth_dev_init(ndev, true);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
return ret;
}
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_device_attach(ndev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..44022b1845ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
int phymode = dwmac->interface;
u32 reg_offset = dwmac->reg_offset;
u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val;
+ u32 ctrl, val, module;
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
@@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk)
+ if (dwmac->f2h_ptp_ref_clk) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
- else
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ &module);
+ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ module);
+ } else {
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+ }
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
+ pm_runtime_get_sync(&priv->pdev->dev);
+
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- pm_runtime_get_sync(&priv->pdev->dev);
-
reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
- pdev->dev.platform_data = pdata;
-
return pdata;
}
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
return -ENOMEM;
mutex_init(&ks->lock);
- ks->spi = spi_dev_get(spi);
+ ks->spi = spi;
ks->chip = &ks8995_chip[variant];
if (ks->spi->dev.of_node) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Huawei E3372 fails unless NDP comes after the IP packets */
- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+ * (12d1:157d), are known to fail unless the NDP is placed
+ * after the IP packets. Applying the quirk to all Huawei
+ * devices is broader than necessary, but harmless.
+ */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
- /* typical case: TCP/UDP over IP and both csums are correct */
- if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
- VMXNET3_RCD_CSUM_OK) {
+ if (gdesc->rcd.v4 &&
+ (le32_to_cpu(gdesc->dword[3]) &
+ VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+ BUG_ON(gdesc->rcd.frg);
+ } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ (1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
- /* TO-DO: return max ethernet size? */
- return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
- /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
- return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
- .family = AF_INET,
- .local_out = vrf_ip_local_out,
- .check = vrf_ip_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
- .family = AF_INET6,
- .local_out = ip6_local_out,
- .check = vrf_ip6_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
- vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
- sizeof(struct rt6_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops6.kmem_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
- kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
- skb->dev->stats.rx_errors++;
- kfree_skb(skb);
- return 0;
-}
-
/* modelled after ip6_finish_output2 */
static int vrf_finish_output6(struct net *net, struct sock *sk,
struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_destroy(&vrf->rt6->dst);
- free_percpu(vrf->rt6->rt6i_pcpu);
+ dst_release(&vrf->rt6->dst);
vrf->rt6 = NULL;
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct dst_entry *dst;
+ struct net *net = dev_net(dev);
struct rt6_info *rt6;
- int cpu;
int rc = -ENOMEM;
- rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rt6 = ip6_dst_alloc(net, dev,
+ DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- dst = &rt6->dst;
-
- rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
- if (!rt6->rt6i_pcpu) {
- dst_destroy(dst);
- goto out;
- }
- for_each_possible_cpu(cpu) {
- struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
- *p = NULL;
- }
-
- memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
- INIT_LIST_HEAD(&rt6->rt6i_siblings);
- INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
- rt6->dst.input = vrf_input6;
rt6->dst.output = vrf_output6;
-
- rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
- atomic_set(&rt6->dst.__refcnt, 2);
-
+ rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+ dst_hold(&rt6->dst);
vrf->rt6 = rt6;
rc = 0;
out:
return rc;
}
#else
-static int init_dst_ops6_kmem_cachep(void)
-{
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
}
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
{
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
- dst_destroy(dst);
+ dst_release(dst);
vrf->rth = NULL;
}
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
- rth = dst_alloc(&vrf_dst_ops, dev, 2,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (rth) {
rth->dst.output = vrf_output;
- rth->rt_genid = rt_genid_ipv4(dev_net(dev));
- rth->rt_flags = 0;
- rth->rt_type = RTN_UNICAST;
- rth->rt_is_input = 0;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
rth->rt_table_id = vrf->tb_id;
- INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_uncached_list = NULL;
}
return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_destroy(vrf);
- vrf_rt6_destroy(vrf);
+ vrf_rtable_release(vrf);
+ vrf_rt6_release(vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
return 0;
out_rth:
- vrf_rtable_destroy(vrf);
+ vrf_rtable_release(vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rth = vrf->rth;
- atomic_inc(&rth->dst.__refcnt);
+ dst_hold(&rth->dst);
}
return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rt = vrf->rt6;
- atomic_inc(&rt->dst.__refcnt);
+ dst_hold(&rt->dst);
}
return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
{
int rc;
- vrf_dst_ops.kmem_cachep =
- kmem_cache_create("vrf_ip_dst_cache",
- sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops.kmem_cachep)
- return -ENOMEM;
-
- rc = init_dst_ops6_kmem_cachep();
- if (rc != 0)
- goto error2;
-
register_netdevice_notifier(&vrf_notifier_block);
rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
error:
unregister_netdevice_notifier(&vrf_notifier_block);
- free_dst_ops6_kmem_cachep();
-error2:
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
return rc;
}
-static void __exit vrf_cleanup_module(void)
-{
- rtnl_link_unregister(&vrf_link_ops);
- unregister_netdevice_notifier(&vrf_notifier_block);
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
- free_dst_ops6_kmem_cachep();
-}
-
module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
-bcma_out:
return err;
bcma_err_wireless_exit:
ieee80211_free_hw(wl->hw);
+bcma_out:
+ kfree(dev);
return err;
}
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_rng_exit(wl);
b43_leds_unregister(wl);
-
ieee80211_free_hw(wl->hw);
+ kfree(wldev->dev);
}
static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_leds_unregister(wl);
b43_wireless_exit(dev, wl);
+ kfree(dev);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ iwl_free_fw_paging(mvm);
+
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
- iwl_free_fw_paging(mvm);
-
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_DEBUG_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
}
EXPORT_SYMBOL(pci_write_vpd);
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev: pci device struct
+ * @len: size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
/**
@@ -498,9 +511,23 @@ out:
return ret ? ret : count;
}
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (len == 0 || len > PCI_VPD_MAX_SIZE)
+ return -EIO;
+
+ vpd->valid = 1;
+ vpd->len = len;
+
+ return 0;
+}
+
static const struct pci_vpd_ops pci_vpd_ops = {
.read = pci_vpd_read,
.write = pci_vpd_write,
+ .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_set_vpd_size(tdev, len);
+ pci_dev_put(tdev);
+ return ret;
+}
+
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
+ .set_size = pci_vpd_f0_set_size,
};
int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
struct imx6_pcie {
- struct gpio_desc *reset_gpio;
+ int reset_gpio;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpio) {
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
}
return 0;
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
arg0.integer.value = reg;
status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
*ret = lret;
- return (status != AE_OK) ? -EINVAL : 0;
+ return 0;
}
/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
DEFINE_CONV(normal, 1, 2, 3);
DEFINE_CONV(y_inverted, 1, -2, 3);
DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
DEFINE_CONV(z_inverted, 1, 2, -3);
DEFINE_CONV(xy_swap, 2, 1, 3);
DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
}
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .freeze = intel_hid_pl_suspend_handler,
+ .restore = intel_hid_pl_resume_handler,
.suspend = intel_hid_pl_suspend_handler,
.resume = intel_hid_pl_resume_handler,
};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- /* This is index 0 to cover BIOS data register */
punit_res = punit_res_array;
+ /* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
*punit_res = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
+ /* This is index 1 to cover BIOS interface register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_IFACE_INDEX);
if (!res) {
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- /* This is index 1 to cover BIOS interface register */
*++punit_res = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
+ /* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
- /* This is index 2 to cover ISP data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
+ /* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
- /* This is index 3 to cover ISP interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
+ /* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
- /* This is index 4 to cover GTD data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
+ /* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
- /* This is index 5 to cover GTD interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
{
u32 telem_ctrl = 0;
- int ret;
+ int ret = 0;
mutex_lock(&(telm_conf->telem_lock));
if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
fan_update_desired_level(s);
mutex_unlock(&fan_mutex);
+ if (rc)
+ return rc;
if (status)
*status = s;
- return rc;
+ return 0;
}
static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
* A user-initiated temperature conversion is not started by this function,
* so the temperature is updated once every 64 seconds.
*/
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- s16 temp;
+ s32 temp;
ret = ds3231_hwmon_read_temp(dev, &temp);
if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
return PTR_ERR(ds1307->rtc);
}
- if (ds1307_can_wakeup_device) {
+ if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
/* Disable request for an IRQ */
want_irq = false;
dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- device_unregister(&dev_info->dev);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
- put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
+ device_unregister(&dev_info->dev);
+ put_device(&dev_info->dev);
+
rc = count;
out_buf:
kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
if (req->cmd_type != REQ_TYPE_FS) {
blk_start_request(req);
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, -EIO);
continue;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..0058d9fbf931 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -720,6 +720,7 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
struct inode *slave_inode;
int retval;
@@ -734,47 +735,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
+ slave_inode = devpts_pty_new(fsi,
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
tty->link);
if (IS_ERR(slave_inode)) {
@@ -793,12 +788,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
-#ifdef CONFIG_ARCH_VERSATILE
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
-#else
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ fb->off_ienb = CLCD_PL111_IENB;
+ fb->off_cntl = CLCD_PL111_CNTL;
+ } else {
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
+ }
}
fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
char *desc, struct gpio_desc **gpiod)
{
- struct gpio_desc *gd;
int r;
- *gpiod = NULL;
-
r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r)
+ if (r) {
+ *gpiod = NULL;
return r == -ENOENT ? 0 : r;
+ }
- gd = gpio_to_desc(gpio);
- if (IS_ERR(gd))
- return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+ *gpiod = gpio_to_desc(gpio);
- *gpiod = gd;
return 0;
}