aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller2016-08-30 00:54:02 -0400
committerDavid S. Miller2016-08-30 00:54:02 -0400
commit6abdd5f5935fff978f950561f3c5175eb34dad73 (patch)
tree6f3ed3a4f4af9e74436ec9355ebf8201357f1c40 /drivers
parent0b498a52778368ff501557d68c7b50878ab1701e (diff)
parente4e98c460ad38c78498622a164fd5ef09a2dc9cb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
All three conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/floppy.c27
-rw-r--r--drivers/block/xen-blkfront.c97
-rw-r--r--drivers/clocksource/bcm_kona_timer.c16
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/clocksource/sun4i_timer.c9
-rw-r--r--drivers/clocksource/time-armada-370-xp.c1
-rw-r--r--drivers/clocksource/time-pistachio.c8
-rw-r--r--drivers/clocksource/timer-atmel-pit.c6
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/edac/Kconfig8
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/skx_edac.c1121
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/gpio-max730x.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c181
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c276
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c43
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c4
-rw-r--r--drivers/gpu/host1x/mipi.c63
-rw-r--r--drivers/hwmon/it87.c3
-rw-r--r--drivers/i2c/busses/i2c-at91.c24
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c14
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c21
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c11
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h20
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c32
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c20
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c9
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.c3
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/silead.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c34
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c7
-rw-r--r--drivers/irqchip/irq-gic-v3.c11
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c18
-rw-r--r--drivers/macintosh/ams/ams-i2c.c1
-rw-r--r--drivers/macintosh/windfarm_pm112.c1
-rw-r--r--drivers/macintosh/windfarm_pm72.c1
-rw-r--r--drivers/macintosh/windfarm_rm31.c1
-rw-r--r--drivers/md/bcache/super.c14
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c27
-rw-r--r--drivers/md/dm-log.c11
-rw-r--r--drivers/md/dm-raid.c82
-rw-r--r--drivers/md/dm-round-robin.c7
-rw-r--r--drivers/misc/cxl/vphb.c10
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mmc/card/queue.h4
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c8
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h11
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c5
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h65
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team_mode_loadbalance.c15
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/kaweth.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/of/base.c14
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/msi.c18
-rw-r--r--drivers/platform/olpc/olpc-ec.c8
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c8
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c22
-rw-r--r--drivers/scsi/ses.c3
-rw-r--r--drivers/thermal/cpu_cooling.c21
-rw-r--r--drivers/thermal/imx_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c1
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/config.c66
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c55
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/rndis.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.c3
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/core.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/misc/ftdi-elan.c10
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/option.c22
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
219 files changed, 3170 insertions, 1016 deletions
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b71a9c767009..e3d8e4ced4a2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ if (!(mode & FMODE_NDELAY)) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+ }
+ res = -EROFS;
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
}
-
- res = -EROFS;
-
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
- goto out;
-
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index be4fea6a5dd3..88ef6d4729b4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -189,6 +189,8 @@ struct blkfront_info
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
+ u16 sector_size;
+ unsigned int physical_sector_size;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
.map_queue = blk_mq_map_queue,
};
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+ struct request_queue *rq = info->rq;
+ struct gendisk *gd = info->gd;
+ unsigned int segments = info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+ if (info->feature_discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+ blk_queue_max_discard_sectors(rq, get_capacity(gd));
+ rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+ }
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_logical_block_size(rq, info->sector_size);
+ blk_queue_physical_block_size(rq, info->physical_sector_size);
+ blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ /* Make sure we don't use bounce buffers. */
+ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
- unsigned int physical_sector_size,
- unsigned int segments)
+ unsigned int physical_sector_size)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
}
rq->queuedata = info;
- queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
- if (info->feature_discard) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
- rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
- }
-
- /* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, sector_size);
- blk_queue_physical_block_size(rq, physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
-
- /* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
- /* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
- gd->queue = rq;
+ info->rq = gd->queue = rq;
+ info->gd = gd;
+ info->sector_size = sector_size;
+ info->physical_sector_size = physical_sector_size;
+ blkif_set_queue_limits(info);
return 0;
}
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->private_data = info;
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
- info->max_indirect_segments ? :
- BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
del_gendisk(gd);
goto release;
}
- info->rq = gd->queue;
- info->gd = gd;
-
xlvbd_flush(info);
if (vdisk_info & VDISK_READONLY)
@@ -1315,7 +1323,7 @@ free_shadow:
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+ free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
rinfo->ring.sring = NULL;
if (rinfo->irq)
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
struct split_bio *split_bio;
blkfront_gather_backend_features(info);
+ /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+ blkif_set_queue_limits(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs);
+ blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
- return;
+ goto fail;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
+ return;
+
+fail:
+ blkif_free(info, 0);
+ return;
}
/**
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 7e3fd375a627..92f6e4deee74 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
-static void
+static int
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- int loop_limit = 4;
+ int loop_limit = 3;
/*
* Read 64-bit free running counter
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
* if new hi-word is equal to previously read hi-word then stop.
*/
- while (--loop_limit) {
+ do {
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
- }
+ } while (--loop_limit);
if (!loop_limit) {
pr_err("bcm_kona_timer: getting counter failed.\n");
pr_err(" Timer will be impacted\n");
+ return -ETIMEDOUT;
}
- return;
+ return 0;
}
static int kona_timer_set_next_event(unsigned long clc,
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
uint32_t lsw, msw;
uint32_t reg;
+ int ret;
- kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ if (ret)
+ return ret;
/* Load the "next" event tick value */
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index d91e8725917c..b4b3ab5a11ad 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
gic_start_count();
}
-static void __init gic_clocksource_of_init(struct device_node *node)
+static int __init gic_clocksource_of_init(struct device_node *node)
{
struct clk *clk;
int ret;
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 937e10b84d58..3e1cb512f3ce 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -21,6 +21,8 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <clocksource/pxa.h>
+
#include <asm/div64.h>
#define OSMR0 0x00 /* OS Timer 0 Match Register */
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 97669ee4df2a..c83452cacb41 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
.set_next_event = sun4i_clkevt_next_event,
};
+static void sun4i_timer_clear_interrupt(void)
+{
+ writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
+}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+ sun4i_timer_clear_interrupt();
evt->event_handler(evt);
return IRQ_HANDLED;
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
/* Make sure timer is stopped before playing with interrupts */
sun4i_clkevt_time_stop(0);
+ /* clear timer0 interrupt */
+ sun4i_timer_clear_interrupt();
+
sun4i_clockevent.cpumask = cpu_possible_mask;
sun4i_clockevent.irq = irq;
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 719b478d136e..3c39e6f45971 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
struct clk *clk = of_clk_get_by_name(np, "fixed");
int ret;
- clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index a7d9a08e4b0e..a8e6c7df853d 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
rate = clk_get_rate(fast_clk);
/* Disable irq's for clocksource usage */
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
/* Enable timer block */
writel(TIMER_ME_GLOBAL, pcs_gpt.base);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index 1ffac0cb0cb7..3494bc5a21d5 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -261,6 +261,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
return PTR_ERR(data->mck);
}
+ ret = clk_prepare_enable(data->mck);
+ if (ret) {
+ pr_err("Unable to enable mck\n");
+ return ret;
+ }
+
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index dfb168568af1..1f01e98c83c7 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
if (rc)
return rc;
+ /* adjust the dax_region resource to the start of data */
+ res.start += le64_to_cpu(pfn_sb->dataoff);
+
nd_region = to_nd_region(dev->parent);
dax_region = alloc_dax_region(dev, nd_region->id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d0c1dab9b435..dff1a4a6dc1b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE
Support for error detection and correction the Intel
Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
+config EDAC_SKX
+ tristate "Intel Skylake server Integrated MC"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ depends on PCI_MMCONFIG
+ help
+ Support for error detection and correction the Intel
+ Skylake server Integrated Memory Controllers.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9e4a3e0e6e9..986049925b08 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
+obj-$(CONFIG_EDAC_SKX) += skx_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
new file mode 100644
index 000000000000..0ff4878c2aa1
--- /dev/null
+++ b/drivers/edac/skx_edac.c
@@ -0,0 +1,1121 @@
+/*
+ * EDAC driver for Intel(R) Xeon(R) Skylake processors
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+#define SKX_REVISION " Ver: 1.0 "
+
+/*
+ * Debug macros
+ */
+#define skx_printk(level, fmt, arg...) \
+ edac_printk(level, "skx", fmt, ##arg)
+
+#define skx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+ (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
+
+static LIST_HEAD(skx_edac_list);
+
+static u64 skx_tolm, skx_tohm;
+
+#define NUM_IMC 2 /* memory controllers per socket */
+#define NUM_CHANNELS 3 /* channels per memory controller */
+#define NUM_DIMMS 2 /* Max DIMMS per channel */
+
+#define MASK26 0x3FFFFFF /* Mask for 2^26 */
+#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+
+/*
+ * Each cpu socket contains some pci devices that provide global
+ * information, and also some that are local to each of the two
+ * memory controllers on the die.
+ */
+struct skx_dev {
+ struct list_head list;
+ u8 bus[4];
+ struct pci_dev *sad_all;
+ struct pci_dev *util_all;
+ u32 mcroute;
+ struct skx_imc {
+ struct mem_ctl_info *mci;
+ u8 mc; /* system wide mc# */
+ u8 lmc; /* socket relative mc# */
+ u8 src_id, node_id;
+ struct skx_channel {
+ struct pci_dev *cdev;
+ struct skx_dimm {
+ u8 close_pg;
+ u8 bank_xor_enable;
+ u8 fine_grain_bank;
+ u8 rowbits;
+ u8 colbits;
+ } dimms[NUM_DIMMS];
+ } chan[NUM_CHANNELS];
+ } imc[NUM_IMC];
+};
+static int skx_num_sockets;
+
+struct skx_pvt {
+ struct skx_imc *imc;
+};
+
+struct decoded_addr {
+ struct skx_dev *dev;
+ u64 addr;
+ int socket;
+ int imc;
+ int channel;
+ u64 chan_addr;
+ int sktways;
+ int chanways;
+ int dimm;
+ int rank;
+ int channel_rank;
+ u64 rank_address;
+ int row;
+ int column;
+ int bank_address;
+ int bank_group;
+};
+
+static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+{
+ struct skx_dev *d;
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->bus[idx] == bus)
+ return d;
+ }
+
+ return NULL;
+}
+
+enum munittype {
+ CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+};
+
+struct munit {
+ u16 did;
+ u16 devfn[NUM_IMC];
+ u8 busidx;
+ u8 per_socket;
+ enum munittype mtype;
+};
+
+/*
+ * List of PCI device ids that we need together with some device
+ * number and function numbers to tell which memory controller the
+ * device belongs to.
+ */
+static const struct munit skx_all_munits[] = {
+ { 0x2054, { }, 1, 1, SAD_ALL },
+ { 0x2055, { }, 1, 1, UTIL_ALL },
+ { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
+ { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
+ { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+ { 0x208e, { }, 1, 0, SAD },
+ { }
+};
+
+/*
+ * We use the per-socket device 0x2016 to count how many sockets are present,
+ * and to detemine which PCI buses are associated with each socket. Allocate
+ * and build the full list of all the skx_dev structures that we need here.
+ */
+static int get_all_bus_mappings(void)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+ pci_read_config_dword(pdev, 0xCC, &reg);
+ d->bus[0] = GET_BITFIELD(reg, 0, 7);
+ d->bus[1] = GET_BITFIELD(reg, 8, 15);
+ d->bus[2] = GET_BITFIELD(reg, 16, 23);
+ d->bus[3] = GET_BITFIELD(reg, 24, 31);
+ edac_dbg(2, "busses: %x, %x, %x, %x\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ list_add_tail(&d->list, &skx_edac_list);
+ skx_num_sockets++;
+ prev = pdev;
+ }
+
+ return ndev;
+}
+
+static int get_all_munits(const struct munit *m)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int i = 0, ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ if (m->per_socket == NUM_IMC) {
+ for (i = 0; i < NUM_IMC; i++)
+ if (m->devfn[i] == pdev->devfn)
+ break;
+ if (i == NUM_IMC)
+ goto fail;
+ }
+ d = get_skx_dev(pdev->bus->number, m->busidx);
+ if (!d)
+ goto fail;
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ skx_printk(KERN_ERR,
+ "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+ goto fail;
+ }
+
+ switch (m->mtype) {
+ case CHAN0: case CHAN1: case CHAN2:
+ pci_dev_get(pdev);
+ d->imc[i].chan[m->mtype].cdev = pdev;
+ break;
+ case SAD_ALL:
+ pci_dev_get(pdev);
+ d->sad_all = pdev;
+ break;
+ case UTIL_ALL:
+ pci_dev_get(pdev);
+ d->util_all = pdev;
+ break;
+ case SAD:
+ /*
+ * one of these devices per core, including cores
+ * that don't exist on this SKU. Ignore any that
+ * read a route table of zero, make sure all the
+ * non-zero values match.
+ */
+ pci_read_config_dword(pdev, 0xB4, &reg);
+ if (reg != 0) {
+ if (d->mcroute == 0)
+ d->mcroute = reg;
+ else if (d->mcroute != reg) {
+ skx_printk(KERN_ERR,
+ "mcroute mismatch\n");
+ goto fail;
+ }
+ }
+ ndev--;
+ break;
+ }
+
+ prev = pdev;
+ }
+
+ return ndev;
+fail:
+ pci_dev_put(pdev);
+ return -ENODEV;
+}
+
+const struct x86_cpu_id skx_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+static u8 get_src_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF0, &reg);
+
+ return GET_BITFIELD(reg, 12, 14);
+}
+
+static u8 skx_get_node_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF4, &reg);
+
+ return GET_BITFIELD(reg, 0, 2);
+}
+
+static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
+ int maxval, char *name)
+{
+ u32 val = GET_BITFIELD(reg, lobit, hibit);
+
+ if (val < minval || val > maxval) {
+ edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+ return -EINVAL;
+ }
+ return val + add;
+}
+
+#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
+
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
+#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
+
+static int get_width(u32 mtr)
+{
+ switch (GET_BITFIELD(mtr, 8, 9)) {
+ case 0:
+ return DEV_X4;
+ case 1:
+ return DEV_X8;
+ case 2:
+ return DEV_X16;
+ }
+ return DEV_UNKNOWN;
+}
+
+static int skx_get_hi_lo(void)
+{
+ struct pci_dev *pdev;
+ u32 reg;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
+ if (!pdev) {
+ edac_dbg(0, "Can't get tolm/tohm\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(pdev, 0xD0, &reg);
+ skx_tolm = reg;
+ pci_read_config_dword(pdev, 0xD4, &reg);
+ skx_tohm = reg;
+ pci_read_config_dword(pdev, 0xD8, &reg);
+ skx_tohm |= (u64)reg << 32;
+
+ pci_dev_put(pdev);
+ edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+
+ return 0;
+}
+
+static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+ struct skx_imc *imc, int chan, int dimmno)
+{
+ int banks = 16, ranks, rows, cols, npages;
+ u64 size;
+
+ if (!IS_DIMM_PRESENT(mtr))
+ return 0;
+ ranks = numrank(mtr);
+ rows = numrow(mtr);
+ cols = numcol(mtr);
+
+ /*
+ * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
+ */
+ size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ imc->mc, chan, dimmno, size, npages,
+ banks, ranks, rows, cols);
+
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+ imc->chan[chan].dimms[dimmno].rowbits = rows;
+ imc->chan[chan].dimms[dimmno].colbits = cols;
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = get_width(mtr);
+ dimm->mtype = MEM_DDR4;
+ dimm->edac_mode = EDAC_SECDED; /* likely better than this */
+ snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+ imc->src_id, imc->lmc, chan, dimmno);
+
+ return 1;
+}
+
+#define SKX_GET_MTMTR(dev, reg) \
+ pci_read_config_dword((dev), 0x87c, &reg)
+
+static bool skx_check_ecc(struct pci_dev *pdev)
+{
+ u32 mtmtr;
+
+ SKX_GET_MTMTR(pdev, mtmtr);
+
+ return !!GET_BITFIELD(mtmtr, 2, 2);
+}
+
+static int skx_get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct skx_pvt *pvt = mci->pvt_info;
+ struct skx_imc *imc = pvt->imc;
+ struct dimm_info *dimm;
+ int i, j;
+ u32 mtr, amap;
+ int ndimms;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ ndimms = 0;
+ pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+ for (j = 0; j < NUM_DIMMS; j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, i, j, 0);
+ pci_read_config_dword(imc->chan[i].cdev,
+ 0x80 + 4*j, &mtr);
+ ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
+ }
+ if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static void skx_unregister_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci = imc->mci;
+
+ if (!mci)
+ return;
+
+ edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->pdev);
+
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+}
+
+static int skx_register_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct pci_dev *pdev = imc->chan[0].cdev;
+ struct skx_pvt *pvt;
+ int rc;
+
+ /* allocate a new MC control structure */
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = NUM_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
+ sizeof(struct skx_pvt));
+
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
+
+ /* Associate skx_dev and mci for future usage */
+ imc->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->imc = imc;
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
+ imc->node_id, imc->lmc);
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "skx_edac.c";
+ mci->dev_name = pci_name(imc->chan[0].cdev);
+ mci->mod_ver = SKX_REVISION;
+ mci->ctl_page_to_phys = NULL;
+
+ rc = skx_get_dimm_config(mci);
+ if (rc < 0)
+ goto fail;
+
+ /* record ptr to the generic device */
+ mci->pdev = &pdev->dev;
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ imc->mci = NULL;
+ return rc;
+}
+
+#define SKX_MAX_SAD 24
+
+#define SKX_GET_SAD(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
+#define SKX_GET_ILV(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
+
+#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
+#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
+#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
+#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
+#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
+#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
+#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
+
+#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
+#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
+
+static bool skx_sad_decode(struct decoded_addr *res)
+{
+ struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
+ u64 addr = res->addr;
+ int i, idx, tgt, lchan, shift;
+ u32 sad, ilv;
+ u64 limit, prev_limit;
+ int remote = 0;
+
+ /* Simple sanity check for I/O space or out of range */
+ if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
+ edac_dbg(0, "Address %llx out of range\n", addr);
+ return false;
+ }
+
+restart:
+ prev_limit = 0;
+ for (i = 0; i < SKX_MAX_SAD; i++) {
+ SKX_GET_SAD(d, i, sad);
+ limit = SKX_SAD_LIMIT(sad);
+ if (SKX_SAD_ENABLE(sad)) {
+ if (addr >= prev_limit && addr <= limit)
+ goto sad_found;
+ }
+ prev_limit = limit + 1;
+ }
+ edac_dbg(0, "No SAD entry for %llx\n", addr);
+ return false;
+
+sad_found:
+ SKX_GET_ILV(d, i, ilv);
+
+ switch (SKX_SAD_INTERLEAVE(sad)) {
+ case 0:
+ idx = GET_BITFIELD(addr, 6, 8);
+ break;
+ case 1:
+ idx = GET_BITFIELD(addr, 8, 10);
+ break;
+ case 2:
+ idx = GET_BITFIELD(addr, 12, 14);
+ break;
+ case 3:
+ idx = GET_BITFIELD(addr, 30, 32);
+ break;
+ }
+
+ tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
+
+ /* If point to another node, find it and start over */
+ if (SKX_ILV_REMOTE(tgt)) {
+ if (remote) {
+ edac_dbg(0, "Double remote!\n");
+ return false;
+ }
+ remote = 1;
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
+ goto restart;
+ }
+ edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
+ return false;
+ }
+
+ if (SKX_SAD_MOD3(sad) == 0)
+ lchan = SKX_ILV_TARGET(tgt);
+ else {
+ switch (SKX_SAD_MOD3MODE(sad)) {
+ case 0:
+ shift = 6;
+ break;
+ case 1:
+ shift = 8;
+ break;
+ case 2:
+ shift = 12;
+ break;
+ default:
+ edac_dbg(0, "illegal mod3mode\n");
+ return false;
+ }
+ switch (SKX_SAD_MOD3ASMOD2(sad)) {
+ case 0:
+ lchan = (addr >> shift) % 3;
+ break;
+ case 1:
+ lchan = (addr >> shift) % 2;
+ break;
+ case 2:
+ lchan = (addr >> shift) % 2;
+ lchan = (lchan << 1) | ~lchan;
+ break;
+ case 3:
+ lchan = ((addr >> shift) % 2) << 1;
+ break;
+ }
+ lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
+ }
+
+ res->dev = d;
+ res->socket = d->imc[0].src_id;
+ res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
+ res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
+
+ edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+ res->addr, res->socket, res->imc, res->channel);
+ return true;
+}
+
+#define SKX_MAX_TAD 8
+
+#define SKX_GET_TADBASE(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
+#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
+#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
+
+#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
+#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
+#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
+#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
+#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
+#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
+#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
+
+/* which bit used for both socket and channel interleave */
+static int skx_granularity[] = { 6, 8, 12, 30 };
+
+static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
+{
+ addr >>= shift;
+ addr /= ways;
+ addr <<= shift;
+
+ return addr | (lowbits & ((1ull << shift) - 1));
+}
+
+static bool skx_tad_decode(struct decoded_addr *res)
+{
+ int i;
+ u32 base, wayness, chnilvoffset;
+ int skt_interleave_bit, chn_interleave_bit;
+ u64 channel_addr;
+
+ for (i = 0; i < SKX_MAX_TAD; i++) {
+ SKX_GET_TADBASE(res->dev, res->imc, i, base);
+ SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
+ if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
+ goto tad_found;
+ }
+ edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+ return false;
+
+tad_found:
+ res->sktways = SKX_TAD_SKTWAYS(wayness);
+ res->chanways = SKX_TAD_CHNWAYS(wayness);
+ skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
+ chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
+
+ SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
+ channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
+
+ if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
+ /* Must handle channel first, then socket */
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, channel_addr);
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, channel_addr);
+ } else {
+ /* Handle socket then channel. Preserve low bits from original address */
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, res->addr);
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, res->addr);
+ }
+
+ res->chan_addr = channel_addr;
+
+ edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+ res->addr, res->chan_addr, res->sktways, res->chanways);
+ return true;
+}
+
+#define SKX_MAX_RIR 4
+
+#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x108 + 4 * (i), &reg)
+#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x120 + 16 * idx + 4 * (i), &reg)
+
+#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
+#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
+#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
+#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
+#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
+
+static bool skx_rir_decode(struct decoded_addr *res)
+{
+ int i, idx, chan_rank;
+ int shift;
+ u32 rirway, rirlv;
+ u64 rank_addr, prev_limit = 0, limit;
+
+ if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
+ shift = 6;
+ else
+ shift = 13;
+
+ for (i = 0; i < SKX_MAX_RIR; i++) {
+ SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
+ limit = SKX_RIR_LIMIT(rirway);
+ if (SKX_RIR_VALID(rirway)) {
+ if (prev_limit <= res->chan_addr &&
+ res->chan_addr <= limit)
+ goto rir_found;
+ }
+ prev_limit = limit;
+ }
+ edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+ return false;
+
+rir_found:
+ rank_addr = res->chan_addr >> shift;
+ rank_addr /= SKX_RIR_WAYS(rirway);
+ rank_addr <<= shift;
+ rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
+
+ res->rank_address = rank_addr;
+ idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
+
+ SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
+ res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
+ chan_rank = SKX_RIR_CHAN_RANK(rirlv);
+ res->channel_rank = chan_rank;
+ res->dimm = chan_rank / 4;
+ res->rank = chan_rank % 4;
+
+ edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+ res->addr, res->dimm, res->rank,
+ res->channel_rank, res->rank_address);
+ return true;
+}
+
+static u8 skx_close_row[] = {
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+static u8 skx_close_column[] = {
+ 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+static u8 skx_open_row[] = {
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+static u8 skx_open_column[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+static u8 skx_open_fine_column[] = {
+ 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int skx_bits(u64 addr, int nbits, u8 *bits)
+{
+ int i, res = 0;
+
+ for (i = 0; i < nbits; i++)
+ res |= ((addr >> bits[i]) & 1) << i;
+ return res;
+}
+
+static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+ if (do_xor)
+ ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+ return ret;
+}
+
+static bool skx_mad_decode(struct decoded_addr *r)
+{
+ struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
+ int bg0 = dimm->fine_grain_bank ? 6 : 13;
+
+ if (dimm->close_pg) {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
+ r->column |= 0x400; /* C10 is autoprecharge, always set */
+ r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
+ r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
+ } else {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
+ if (dimm->fine_grain_bank)
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
+ else
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
+ r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
+ r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
+ }
+ r->row &= (1u << dimm->rowbits) - 1;
+
+ edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+ r->addr, r->row, r->column, r->bank_address,
+ r->bank_group);
+ return true;
+}
+
+static bool skx_decode(struct decoded_addr *res)
+{
+
+ return skx_sad_decode(res) && skx_tad_decode(res) &&
+ skx_rir_decode(res) && skx_mad_decode(res);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static struct dentry *skx_test;
+static u64 skx_fake_addr;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct decoded_addr res;
+
+ res.addr = val;
+ skx_decode(&res);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static struct dentry *mydebugfs_create(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+}
+
+static void setup_skx_debug(void)
+{
+ skx_test = debugfs_create_dir("skx_edac_test", NULL);
+ mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void)
+{
+}
+
+static void teardown_skx_debug(void)
+{
+}
+#endif /*CONFIG_EDAC_DEBUG*/
+
+static void skx_mce_output_error(struct mem_ctl_info *mci,
+ const struct mce *m,
+ struct decoded_addr *res)
+{
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+ bool recoverable;
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+
+ recoverable = GET_BITFIELD(m->status, 56, 56);
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+ /*
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (!((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ snprintf(msg, sizeof(msg),
+ "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ mscod, errcode,
+ res->socket, res->imc, res->rank,
+ res->bank_group, res->bank_address, res->row, res->column);
+
+ edac_dbg(0, "%s\n", msg);
+
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ res->channel, res->dimm, -1,
+ optype, msg);
+}
+
+static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct decoded_addr res;
+ struct mem_ctl_info *mci;
+ char *type;
+
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
+ /* ignore unless this is memory related with an address */
+ if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+ return NOTIFY_DONE;
+
+ res.addr = mce->addr;
+ if (!skx_decode(&res))
+ return NOTIFY_DONE;
+ mci = res.dev->imc[res.imc].mci;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
+
+ skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+ "Bank %d: %016Lx\n", mce->extcpu, type,
+ mce->mcgstatus, mce->bank, mce->status);
+ skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+ skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+ skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+
+ skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+ "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+ mce->time, mce->socketid, mce->apicid);
+
+ skx_mce_output_error(mci, mce, &res);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block skx_mce_dec = {
+ .notifier_call = skx_mce_check_error,
+};
+
+static void skx_remove(void)
+{
+ int i, j;
+ struct skx_dev *d, *tmp;
+
+ edac_dbg(0, "\n");
+
+ list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
+ list_del(&d->list);
+ for (i = 0; i < NUM_IMC; i++) {
+ skx_unregister_mci(&d->imc[i]);
+ for (j = 0; j < NUM_CHANNELS; j++)
+ pci_dev_put(d->imc[i].chan[j].cdev);
+ }
+ pci_dev_put(d->util_all);
+ pci_dev_put(d->sad_all);
+
+ kfree(d);
+ }
+}
+
+/*
+ * skx_init:
+ * make sure we are running on the correct cpu model
+ * search for all the devices we need
+ * check which DIMMs are present.
+ */
+int __init skx_init(void)
+{
+ const struct x86_cpu_id *id;
+ const struct munit *m;
+ int rc = 0, i;
+ u8 mc = 0, src_id, node_id;
+ struct skx_dev *d;
+
+ edac_dbg(2, "\n");
+
+ id = x86_match_cpu(skx_cpuids);
+ if (!id)
+ return -ENODEV;
+
+ rc = skx_get_hi_lo();
+ if (rc)
+ return rc;
+
+ rc = get_all_bus_mappings();
+ if (rc < 0)
+ goto fail;
+ if (rc == 0) {
+ edac_dbg(2, "No memory controllers found\n");
+ return -ENODEV;
+ }
+
+ for (m = skx_all_munits; m->did; m++) {
+ rc = get_all_munits(m);
+ if (rc < 0)
+ goto fail;
+ if (rc != m->per_socket * skx_num_sockets) {
+ edac_dbg(2, "Expected %d, got %d of %x\n",
+ m->per_socket * skx_num_sockets, rc, m->did);
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ src_id = get_src_id(d);
+ node_id = skx_get_node_id(d);
+ edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+ for (i = 0; i < NUM_IMC; i++) {
+ d->imc[i].mc = mc++;
+ d->imc[i].lmc = i;
+ d->imc[i].src_id = src_id;
+ d->imc[i].node_id = node_id;
+ rc = skx_register_mci(&d->imc[i]);
+ if (rc < 0)
+ goto fail;
+ }
+ }
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ setup_skx_debug();
+
+ mce_register_decode_chain(&skx_mce_dec);
+
+ return 0;
+fail:
+ skx_remove();
+ return rc;
+}
+
+static void __exit skx_exit(void)
+{
+ edac_dbg(2, "\n");
+ mce_unregister_decode_chain(&skx_mce_dec);
+ skx_remove();
+ teardown_skx_debug();
+}
+
+module_init(skx_init);
+module_exit(skx_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 98dd47a30fc7..66a94103798b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -50,6 +50,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
depends on OF
+ depends on HAS_IOMEM
config GPIO_ACPI
def_bool y
@@ -188,7 +189,7 @@ config GPIO_EP93XX
config GPIO_ETRAXFS
bool "Axis ETRAX FS General I/O"
depends on CRIS || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
help
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select IRQ_DOMAIN
help
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION
- depends on OF
+ depends on OF_GPIO
select GENERIC_IRQ_CHIP
config GPIO_MXC
@@ -405,7 +406,7 @@ config GPIO_TEGRA
bool "NVIDIA Tegra GPIO support"
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders"
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF
+ depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 08807368f007..946d09195598 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (ret)
+ goto exit_destroy;
+
/*
* initialize pullups according to platform data and cache the
* register values for later use.
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
}
}
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
return ret;
exit_destroy:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1eb4c0..700c56baf2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -426,6 +426,8 @@ struct amdgpu_mman {
/* custom LRU management */
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363b06..fe872b82e619 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..10b5ddf2c588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
-#if 1
- /* This is a temporary hack until the D3 cold support
- * makes it upstream. The ATPX power_control method seems
- * to still work on even if the system should be using
- * the new standardized hybrid D3 cold ACPI interface.
- */
- atpx->functions.power_cntl = true;
-#else
atpx->functions.power_cntl = false;
-#endif
atpx->is_hybrid = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df0b0..0feea347f680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9b61c8ba7aaf..716f2afeb6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
+ while ((++lru)->lru[tbo->mem.mem_type] == res)
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
+ while ((++lru)->swap_lru == res)
+ lru->swap_lru = &tbo->swap;
return res;
}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ adev->mman.guard.lru[j] = NULL;
+ adev->mman.guard.swap_lru = NULL;
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8868d7..4aa993d19018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
-error:
fence_put(fence);
+
+error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc48df4..80120fa4092c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs);
if (r)
- return r;
+ goto err;
vm->page_directory_fence = NULL;
@@ -1565,6 +1565,9 @@ error_free_page_directory:
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
+err:
+ drm_free_large(vm->page_tables);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e834a2..a64715d90503 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
- } else if (r) {
+ } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63126..a7d3cb3fead0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n"
- " doorbell offset == 0x%08d\n"
+ " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ef312bb75fda..963a24d46a93 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (s_job)
+ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index fa3930757972..2a3ded44cf2a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
val,
-1,
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->ctm_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
val,
sizeof(struct drm_color_ctm),
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->gamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
val,
-1,
&replaced);
- state->color_mgmt_changed = replaced;
+ state->color_mgmt_changed |= replaced;
return ret;
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b1dbb60af99f..ddebe54cd5ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5404,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_pending_vblank_event *e = NULL;
int ret = -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index ce54e985d91b..0a06f9120b5a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
- if (lockless_dereference(dev->master))
+ if (READ_ONCE(dev->master))
return false;
drm_for_each_crtc(crtc, dev) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 87ef34150d46..b382cf505262 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (ret < 0)
return ret;
- mutex_lock(&gpu->lock);
-
/*
* TODO
*
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (unlikely(event == ~0U)) {
DRM_ERROR("no free event\n");
ret = -EBUSY;
- goto out_unlock;
+ goto out_pm_put;
}
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_pm_put;
}
+ mutex_lock(&gpu->lock);
+
gpu->event[event].fence = fence;
submit->fence = fence->seqno;
gpu->active_fence = submit->fence;
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
-out_unlock:
mutex_unlock(&gpu->lock);
+out_pm_put:
etnaviv_gpu_pm_put(gpu);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21f939074abc..f68c78918d63 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -882,11 +882,12 @@ struct i915_gem_context {
struct i915_ctx_hang_stats hang_stats;
- /* Unique identifier for this context, used by the hw for tracking */
unsigned long flags;
#define CONTEXT_NO_ZEROMAP BIT(0)
#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
- unsigned hw_id;
+
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned int hw_id;
u32 user_handle;
u32 ggtt_alignment;
@@ -1854,6 +1855,7 @@ struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
+ struct drm_modeset_acquire_ctx reset_ctx;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
@@ -1962,6 +1964,13 @@ struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
+ enum {
+ I915_SKL_SAGV_UNKNOWN = 0,
+ I915_SKL_SAGV_DISABLED,
+ I915_SKL_SAGV_ENABLED,
+ I915_SKL_SAGV_NOT_CONTROLLED
+ } skl_sagv_status;
+
struct {
/*
* Raw watermark latency values:
@@ -3590,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
+ wmb();
if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 11681501d7b1..a77ce9983f69 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_shmem_pread(dev, obj, args, file);
/* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV)
+ if (ret == -EFAULT || ret == -ENODEV) {
+ intel_runtime_pm_get(to_i915(dev));
ret = i915_gem_gtt_pread(dev, obj, args->size,
args->offset, args->data_ptr);
+ intel_runtime_pm_put(to_i915(dev));
+ }
out:
drm_gem_object_unreference(&obj->base);
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT) {
+ if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else if (i915_gem_object_has_struct_page(obj))
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
}
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+
+ engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
}
void i915_gem_reset(struct drm_device *dev)
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev)
for_each_engine(engine, dev_priv)
i915_gem_reset_engine_cleanup(engine);
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
i915_gem_context_reset(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1978633e7549..b35e5b6475b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
{
const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
- uint32_t flush_domains = 0;
- bool flush_chipset = false;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- flush_chipset |= i915_gem_clflush_object(obj, false);
-
- flush_domains |= obj->base.write_domain;
+ i915_gem_clflush_object(obj, false);
}
- if (flush_chipset)
- i915_gem_chipset_flush(req->engine->i915);
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
+ /* Unconditionally flush any chipset caches (for streaming writes). */
+ i915_gem_chipset_flush(req->engine->i915);
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 10f1e32767e6..7a30af79d799 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
i915_gem_cleanup_stolen(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ce14fe09d962..bf2cad3f9e1f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
/*
* Fence registers
@@ -7144,6 +7145,15 @@ enum {
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
@@ -7165,6 +7175,10 @@ enum {
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+#define GEN9_PCODE_SAGV_CONTROL 0x21
+#define GEN9_SAGV_DISABLE 0x0
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6700a7be7f78..d32f586f9c05 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return;
+ i915_audio_component_get_power(dev);
+
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
+
+ i915_audio_component_put_power(dev);
}
/* Get CDCLK in kHz */
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
!IS_HASWELL(dev_priv))
return 0;
+ i915_audio_component_get_power(dev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = dev_priv->dig_port_map[port];
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
unlock:
mutex_unlock(&dev_priv->av_mutex);
+ i915_audio_component_put_power(dev);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 3edb9580928e..c3b33a10c15c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,15 +41,15 @@
* be moved to FW_FAILED.
*/
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index dd1d6fe12297..1a7efac65fd5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x1 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
}
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+ int n_hdmi_entries;
+ int hdmi_level;
+ int hdmi_default_entry;
+
+ hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_BROXTON(dev_priv))
+ return hdmi_level;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+ hdmi_default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ hdmi_default_entry = 6;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ }
+
+ /* Choose a good default if VBT is badly populated */
+ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+ hdmi_level >= n_hdmi_entries)
+ hdmi_level = hdmi_default_entry;
+
+ return hdmi_level;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
size;
int hdmi_level;
enum port port;
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
const struct ddi_buf_trans *ddi_translations;
port = intel_ddi_get_encoder_port(encoder);
- hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_BROXTON(dev_priv)) {
if (encoder->type != INTEL_OUTPUT_HDMI)
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
- hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
}
switch (encoder->type) {
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
- /* Choose a good default if VBT is badly populated */
- if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
- hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_default_entry;
-
/* Entry 9 is for HDMI: */
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
-static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ enum port port, uint8_t iboost)
{
+ u32 tmp;
+
+ tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+ if (iboost)
+ tmp |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ tmp |= BALANCE_LEG_DISABLE(port);
+ I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->port;
+ int type = encoder->type;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
int n_entries;
- u32 reg;
/* VBT may override standard boost values */
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
return;
}
- reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
- reg &= ~BALANCE_LEG_MASK(port);
- reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
-
- if (iboost)
- reg |= iboost << BALANCE_LEG_SHIFT(port);
- else
- reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+ _skl_ddi_set_iboost(dev_priv, port, iboost);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+ if (port == PORT_A && intel_dig_port->max_lanes == 4)
+ _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+ skl_ddi_set_iboost(encoder, level);
else if (IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int level = intel_ddi_hdmi_level(dev_priv, port);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(intel_encoder, level);
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dcf93b3d4fb6..175595fc3e45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev)
for_each_crtc(dev, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state;
-
- drm_modeset_lock_crtc(crtc, &plane->base);
- plane_state = to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
if (plane_state->visible)
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
+ }
+}
+
+static int
+__intel_display_resume(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ intel_modeset_setup_hw_state(dev);
+ i915_redisable_vga(dev);
- drm_modeset_unlock_crtc(crtc);
+ if (!state)
+ return 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
}
+
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+ ret = drm_atomic_commit(state);
+
+ WARN_ON(ret == -EDEADLK);
+ return ret;
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
/* no reset support for gen2 */
if (IS_GEN2(dev_priv))
return;
- /* reset doesn't touch the display */
+ /*
+ * Need mode_config.mutex so that we don't
+ * trample ongoing ->detect() and whatnot.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(ctx);
+ }
+
+ /* reset doesn't touch the display, but flips might get nuked anyway, */
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(&dev_priv->drm);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(&dev_priv->drm);
+ state = drm_atomic_helper_duplicate_state(dev, ctx);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ state = NULL;
+ DRM_ERROR("Duplicating state failed with %i\n", ret);
+ goto err;
+ }
+
+ ret = drm_atomic_helper_disable_all(dev, ctx);
+ if (ret) {
+ DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ goto err;
+ }
+
+ dev_priv->modeset_restore_state = state;
+ state->acquire_ctx = ctx;
+ return;
+
+err:
+ drm_atomic_state_free(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ int ret;
+
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
if (IS_GEN2(dev_priv))
return;
+ dev_priv->modeset_restore_state = NULL;
+
/* reset doesn't touch the display */
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
- intel_update_primary_planes(&dev_priv->drm);
- return;
- }
-
- /*
- * The display has been reset as well,
- * so need a full re-initialization.
- */
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_update_primary_planes(dev);
+ } else {
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(&dev_priv->drm);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(&dev_priv->drm);
+ ret = __intel_display_resume(dev, state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(dev_priv);
+ }
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+ mutex_unlock(&dev->mode_config.mutex);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -13684,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
dev_priv->display.modeset_commit_cdclk(state);
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we
+ * have more then one pipe enabled
+ */
+ if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+ skl_disable_sagv(dev_priv);
+
intel_modeset_verify_disabled(dev);
}
@@ -13757,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
+ if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+ skl_can_enable_sagv(state))
+ skl_enable_sagv(dev_priv);
+
drm_atomic_helper_commit_hw_done(state);
if (intel_state->modeset)
@@ -16156,9 +16242,10 @@ void intel_display_resume(struct drm_device *dev)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- bool setup = false;
dev_priv->modeset_restore_state = NULL;
+ if (state)
+ state->acquire_ctx = &ctx;
/*
* This is a cludge because with real atomic modeset mode_config.mutex
@@ -16169,43 +16256,17 @@ void intel_display_resume(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
- if (ret == 0 && !setup) {
- setup = true;
-
- intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
- }
-
- if (ret == 0 && state) {
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- state->acquire_ctx = &ctx;
-
- /* ignore any reset values/BIOS leftovers in the WM registers */
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- /*
- * Force recalculation even if we restore
- * current state. With fast modeset this may not result
- * in a modeset when the state is compatible.
- */
- crtc_state->mode_changed = true;
- }
-
- ret = drm_atomic_commit(state);
- }
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret != -EDEADLK)
+ break;
- if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
- goto retry;
}
+ if (!ret)
+ ret = __intel_display_resume(dev, state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cc937a19b1ba..ff399b9a5c1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6a7ad3ed1463..3836a1c79714 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (i915.enable_fbc >= 0)
return !!i915.enable_fbc;
+ if (!HAS_FBC(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv))
return 1;
return 0;
}
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+ if (intel_iommu_gfx_mapped &&
+ (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+ DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ return true;
+ }
+#endif
+
+ return false;
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
fbc->work.scheduled = false;
+ if (need_fbc_vtd_wa(dev_priv))
+ mkwrite_device_info(dev_priv)->has_fbc = false;
+
i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 97ba6c8cf907..53e13c10e4ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
+#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+int
+skl_enable_sagv(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+ return 0;
+
+ DRM_DEBUG_KMS("Enabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for the SAGV when enabling */
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (ret == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to enable the SAGV\n");
+ return ret;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+ return 0;
+}
+
+static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ uint32_t temp = GEN9_SAGV_DISABLE;
+
+ ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ &temp);
+ if (ret)
+ return ret;
+ else
+ return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
+skl_disable_sagv(struct drm_i915_private *dev_priv)
+{
+ int ret, result;
+
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+ return 0;
+
+ DRM_DEBUG_KMS("Disabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("Request to disable SAGV timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (result == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (result < 0) {
+ DRM_ERROR("Failed to disable the SAGV\n");
+ return result;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+ return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+ int level, plane;
+
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we have
+ * more then one pipe enabled
+ *
+ * If there are no active CRTCs, no additional checks need be performed
+ */
+ if (hweight32(intel_state->active_crtcs) == 0)
+ return true;
+ else if (hweight32(intel_state->active_crtcs) > 1)
+ return false;
+
+ /* Since we're now guaranteed to only have one active CRTC... */
+ pipe = ffs(intel_state->active_crtcs) - 1;
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane(dev_priv, pipe, plane) {
+ /* Skip this plane if it's not enabled */
+ if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(dev);
+ intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ { }
+
+ /*
+ * If any of the planes on this pipe don't enable wm levels
+ * that incur memory latencies higher then 30µs we can't enable
+ * the SAGV
+ */
+ if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ return false;
+ }
+
+ return true;
+}
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
}
- WARN_ON(cstate->plane_mask && total_data_rate == 0);
-
return total_data_rate;
}
@@ -3344,6 +3490,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
plane_bytes_per_line *= 4;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line /= 4;
+ } else if (tiling == DRM_FORMAT_MOD_NONE) {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
} else {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
}
@@ -3910,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
* pretend that all pipes switched active status so that we'll
* ensure a full DDB recompute.
*/
- if (dev_priv->wm.distrust_bios_wm)
+ if (dev_priv->wm.distrust_bios_wm) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
intel_state->active_pipe_changes = ~0;
+ /*
+ * We usually only initialize intel_state->active_crtcs if we
+ * we're doing a modeset; make sure this field is always
+ * initialized during the sanitization process that happens
+ * on the first commit too.
+ */
+ if (!intel_state->modeset)
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+ }
+
/*
* If the modeset changes which CRTC's are active, we need to
* recompute the DDB allocation for *all* active pipes, even
@@ -3941,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
ret = skl_allocate_pipe_ddb(cstate, ddb);
if (ret)
return ret;
+
+ ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+ if (ret)
+ return ret;
}
return 0;
}
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+ struct skl_wm_values *src,
+ enum pipe pipe)
+{
+ dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+ memcpy(dst->plane[pipe], src->plane[pipe],
+ sizeof(dst->plane[pipe]));
+ memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+ sizeof(dst->plane_trans[pipe]));
+
+ dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+ memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+ sizeof(dst->ddb.y_plane[pipe]));
+ memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+ sizeof(dst->ddb.plane[pipe]));
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -4018,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ int pipe;
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
@@ -4031,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc)
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
- /* store the new configuration */
- dev_priv->wm.skl_hw = *results;
+ /*
+ * Store the new configuration (but only for the pipes that have
+ * changed; the other values weren't recomputed).
+ */
+ for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
+ skl_copy_wm_for_pipe(hw_vals, results, pipe);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -6574,9 +6765,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv))
- return;
- else if (IS_VALLEYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
if (!i915.enable_rc6)
@@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev)
}
}
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(flags)
+ return 0;
+ }
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(flags);
+ return 0;
+ }
+}
+
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
*val = I915_READ_FW(GEN6_PCODE_DATA);
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
+
return 0;
}
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+ u32 mbox, u32 val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cca7792f26d5..1d3161bbea24 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
- /* WaInsertDummyPushConstPs:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
- /* WaInsertDummyPushConstPs:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 23ac8041c562..294de4549922 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -2,6 +2,9 @@ config DRM_MEDIATEK
tristate "DRM Support for Mediatek SoCs"
depends on DRM
depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
+ depends on HAVE_ARM_SMCCC
+ depends on OF
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index df2657051afd..28c1423049c5 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
}
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
.deferred_io = drm_fb_helper_deferred_io,
};
+#endif
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
@@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
goto out_destroy_fbi;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
info->fbdefio = &qxl_defio;
fb_deferred_io_init(info);
+#endif
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a97abc8af657..1dcf39084555 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
- if (rdev->family >= CHIP_RV770)
+ if (ASIC_IS_AVIVO(rdev) &&
+ rdev->family != CHIP_RS780 &&
+ rdev->family != CHIP_RS880)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6de342861202..ddef0d494084 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
-#if 1
- /* This is a temporary hack until the D3 cold support
- * makes it upstream. The ATPX power_control method seems
- * to still work on even if the system should be using
- * the new standardized hybrid D3 cold ACPI interface.
- */
- atpx->functions.power_cntl = true;
-#else
atpx->functions.power_cntl = false;
-#endif
atpx->is_hybrid = true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0c00e192c845..c2e0a1ccdfbc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 3d228ad90e0f..3dea1216bafd 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
+static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ if (dsi->slave)
+ tegra_dsi_unprepare(dsi->slave);
+
+ err = tegra_mipi_disable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
+ err);
+
+ pm_runtime_put(dsi->dev);
+}
+
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_disable(dsi);
- pm_runtime_put(dsi->dev);
+ tegra_dsi_unprepare(dsi);
+}
+
+static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ err = tegra_mipi_enable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
+ err);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+
+ if (dsi->slave)
+ tegra_dsi_prepare(dsi->slave);
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
- int err;
-
- pm_runtime_get_sync(dsi->dev);
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0)
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+ tegra_dsi_prepare(dsi);
state = tegra_dsi_get_state(dsi);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d5df555aeba0..9688bfa92ccd 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
ufbdev->fb_count++;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
+#endif
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
ufbdev->fb_count--;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = udl_fb_mmap;
}
+#endif
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 52a6fd224127..e00809d996a2 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev->pads = args.args[0];
dev->device = device;
- mutex_lock(&dev->mipi->lock);
-
- if (dev->mipi->usage_count++ == 0) {
- err = tegra_mipi_power_up(dev->mipi);
- if (err < 0) {
- dev_err(dev->mipi->dev,
- "failed to power up MIPI bricks: %d\n",
- err);
- return ERR_PTR(err);
- }
- }
-
- mutex_unlock(&dev->mipi->lock);
-
return dev;
put:
@@ -270,29 +256,42 @@ EXPORT_SYMBOL(tegra_mipi_request);
void tegra_mipi_free(struct tegra_mipi_device *device)
{
- int err;
+ platform_device_put(device->pdev);
+ kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
- mutex_lock(&device->mipi->lock);
+int tegra_mipi_enable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
- if (--device->mipi->usage_count == 0) {
- err = tegra_mipi_power_down(device->mipi);
- if (err < 0) {
- /*
- * Not much that can be done here, so an error message
- * will have to do.
- */
- dev_err(device->mipi->dev,
- "failed to power down MIPI bricks: %d\n",
- err);
- }
- }
+ mutex_lock(&dev->mipi->lock);
- mutex_unlock(&device->mipi->lock);
+ if (dev->mipi->usage_count++ == 0)
+ err = tegra_mipi_power_up(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
- platform_device_put(device->pdev);
- kfree(device);
}
-EXPORT_SYMBOL(tegra_mipi_free);
+EXPORT_SYMBOL(tegra_mipi_enable);
+
+int tegra_mipi_disable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->mipi->lock);
+
+ if (--dev->mipi->usage_count == 0)
+ err = tegra_mipi_power_down(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
+
+}
+EXPORT_SYMBOL(tegra_mipi_disable);
static int tegra_mipi_wait(struct tegra_mipi *mipi)
{
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 730d84028260..4667012b46b7 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -491,7 +491,7 @@ struct it87_sio_data {
struct it87_data {
const struct attribute_group *groups[7];
enum chips type;
- u16 features;
+ u32 features;
u8 peci_mask;
u8 old_peci_mask;
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = {
&sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
&sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
&sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
+ NULL
};
static const struct attribute_group it87_group_in = {
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f23372669f77..1bb97f658b47 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -38,6 +38,7 @@
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
#define AUTOSUSPEND_TIMEOUT 2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -141,6 +142,7 @@ struct at91_twi_dev {
unsigned twi_cwgr_reg;
struct at91_twi_pdata *pdata;
bool use_dma;
+ bool use_alt_cmd;
bool recv_len_abort;
u32 fifo_size;
struct at91_twi_dma dma;
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
/* send stop when last byte has been written */
if (--dev->buf_len == 0)
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data)
* we just have to enable TXCOMP one.
*/
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
}
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
}
/* send stop if second but last byte has been read */
- if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
+ if (!dev->use_alt_cmd && dev->buf_len == 1)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data)
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
dev->buf_len, DMA_FROM_DEVICE);
- if (!dev->pdata->has_alt_cmd) {
+ if (!dev->use_alt_cmd) {
/* The last two bytes have to be read without using dma */
dev->buf += dev->buf_len - 2;
dev->buf_len = 2;
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
struct dma_chan *chan_rx = dma->chan_rx;
size_t buf_len;
- buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
+ buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
dma->direction = DMA_FROM_DEVICE;
/* Keep in mind that we won't use dma to read the last two bytes */
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
unsigned start_flags = AT91_TWI_START;
/* if only one byte is to be read, immediately stop transfer */
- if (!has_alt_cmd && dev->buf_len <= 1 &&
+ if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
!(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
int ret;
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
- bool is_read, use_alt_cmd = false;
+ bool is_read;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
at91_twi_write(dev, AT91_TWI_IADR, internal_address);
}
+ dev->use_alt_cmd = false;
is_read = (m_start->flags & I2C_M_RD);
if (dev->pdata->has_alt_cmd) {
- if (m_start->len > 0) {
+ if (m_start->len > 0 &&
+ m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
at91_twi_write(dev, AT91_TWI_ACR,
AT91_TWI_ACR_DATAL(m_start->len) |
((is_read) ? AT91_TWI_ACR_DIR : 0));
- use_alt_cmd = true;
+ dev->use_alt_cmd = true;
} else {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
}
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
at91_twi_write(dev, AT91_TWI_MMR,
(m_start->addr << 16) |
int_addr_flag |
- ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
+ ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
dev->buf_len = m_start->len;
dev->buf = m_start->buf;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 19c843828fe2..95f7cac76f89 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
if (status & BIT(IS_M_START_BUSY_SHIFT)) {
iproc_i2c->xfer_is_done = 1;
- complete_all(&iproc_i2c->done);
+ complete(&iproc_i2c->done);
}
writel(status, iproc_i2c->base + IS_OFFSET);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index ac9f47679c3a..f98743277e3c 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
dev->base + TXFCR_OFFSET);
writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
- complete_all(&dev->done);
+ complete(&dev->done);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3f5a4d71d3bf..385b57bfcb38 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid)
return IRQ_NONE;
brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
- complete_all(&dev->done);
+ complete(&dev->done);
dev_dbg(dev->device, "isr handled");
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index a0d95ff682ae..2d5ff86398d0 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
msg->outsize = request_len;
msg->insize = response_len;
- result = cros_ec_cmd_xfer(bus->ec, msg);
+ result = cros_ec_cmd_xfer_status(bus->ec, msg);
if (result < 0) {
dev_err(dev, "Error transferring EC i2c message %d\n", result);
goto exit;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 71d3929adf54..76e28980904f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c)
meson_i2c_add_token(i2c, TOKEN_STOP);
} else {
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
}
}
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
dev_dbg(i2c->dev, "error bit set\n");
i2c->error = -ENXIO;
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
goto out;
}
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
break;
case STATE_STOP:
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
break;
case STATE_IDLE:
break;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index dfa7a4b4a91d..ac88a524143e 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
if (!clock_frequency_present) {
dev_err(&pdev->dev,
"Missing required parameter 'opencores,ip-clock-frequency'\n");
+ clk_disable_unprepare(i2c->clk);
return -ENODEV;
}
i2c->ip_clock_khz = clock_frequency / 1000;
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
i2c->reg_io_width);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_clk;
}
}
ret = ocores_init(&pdev->dev, i2c);
if (ret)
- return ret;
+ goto err_clk;
init_waitqueue_head(&i2c->wait);
ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return ret;
+ goto err_clk;
}
/* hook up driver to tree */
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
- return ret;
+ goto err_clk;
}
/* add in known devices to the bus */
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev)
}
return 0;
+
+err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
}
static int ocores_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 8de073aed001..215ac87f606d 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
- goto err;
+ goto err_with_revert;
}
p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
@@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
err_with_put:
i2c_put_adapter(adap);
+ err_with_revert:
+ of_changeset_revert(&priv->chan[new_chan].chgset);
err:
dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e6dfa1bd3def..5f65a78b27c9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (addr->dev_addr.bound_dev_if) {
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
if (ndev->flags & IFF_LOOPBACK) {
dev_put(ndev);
- if (!id_priv->id.device->get_netdev)
- return -EOPNOTSUPP;
+ if (!id_priv->id.device->get_netdev) {
+ ret = -EOPNOTSUPP;
+ goto err2;
+ }
ndev = id_priv->id.device->get_netdev(id_priv->id.device,
id_priv->id.port_num);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
}
route->path_rec->net = &init_net;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 3aca7f6171b4..b6a953aed7e8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1827,8 +1827,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
(ep->mpa_pkt + sizeof(*mpa));
ep->ird = ntohs(mpa_v2_params->ird) &
MPA_V2_IRD_ORD_MASK;
+ ep->ird = min_t(u32, ep->ird,
+ cur_max_read_depth(ep->com.dev));
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
+ ep->ord = min_t(u32, ep->ord,
+ cur_max_read_depth(ep->com.dev));
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -3136,7 +3140,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
if (RELAXED_IRD_NEGOTIATION) {
- ep->ord = ep->ird;
+ conn_param->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 812ab7278b8e..ac926c942fee 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
- int ret;
+ int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
- ret = t4_arm_cq(&chp->cq,
- (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+ ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
- if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- ret = 0;
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 6126bbe36095..02173f4315fa 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+ return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 79575ee873f2..0566393e5aba 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -47,7 +47,6 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
-#include <linux/cpumask.h>
#include "hfi.h"
#include "affinity.h"
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
size_t count)
{
struct hfi1_affinity_node *entry;
- struct cpumask mask;
+ cpumask_var_t mask;
int ret, i;
spin_lock(&node_affinity.lock);
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
if (!entry)
return -EINVAL;
- ret = cpulist_parse(buf, &mask);
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ ret = cpulist_parse(buf, mask);
if (ret)
- return ret;
+ goto out;
- if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
+ if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
dd_dev_warn(dd, "Invalid CPU mask\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
mutex_lock(&sdma_affinity_mutex);
/* reset the SDMA interrupt affinity details */
init_cpu_mask_set(&entry->def_intr);
- cpumask_copy(&entry->def_intr.mask, &mask);
+ cpumask_copy(&entry->def_intr.mask, mask);
/*
* Reassign the affinity for each SDMA interrupt.
*/
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
if (ret)
break;
}
-
mutex_unlock(&sdma_affinity_mutex);
+out:
+ free_cpumask_var(mask);
return ret ? ret : strnlen(buf, PAGE_SIZE);
}
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..a49cc88f08a2 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
DEBUGFS_FILE_OPS(ctx_stats);
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+ __acquires(RCU)
{
struct qp_iter *iter;
loff_t n = *pos;
- rcu_read_lock();
iter = qp_iter_init(s->private);
+
+ /* stop calls rcu_read_unlock */
+ rcu_read_lock();
+
if (!iter)
return NULL;
- while (n--) {
+ do {
if (qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
- }
+ } while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
+ __must_hold(RCU)
{
struct qp_iter *iter = iter_ptr;
@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+ __releases(RCU)
{
rcu_read_unlock();
}
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 8246dc7d0573..303f10555729 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
}
static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet packet,
+ struct hfi1_packet *packet,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
- packet.rhf_addr);
+ struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ u8 etype = rhf_rcv_type(packet->rhf);
- if (hdr2sc(hdr, packet.rhf) != 0xf) {
+ if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
int hwstate = read_logical_state(dd);
if (hwstate != LSTATE_ACTIVE) {
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
/* Auto activate link on non-SC15 packet receive */
if (unlikely(rcd->ppd->host_link_state ==
HLS_UP_ARMED) &&
- set_armed_to_active(rcd, packet, dd))
+ set_armed_to_active(rcd, &packet, dd))
goto bail;
last = process_rcv_packet(&packet, thread);
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1ecbec192358..7e03ccd2554d 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
+ atomic_inc(&fd->mm->mm_count);
}
fp->private_data = fd;
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
return ret;
- setup_ctxt(fp);
+ ret = setup_ctxt(fp);
if (ret)
return ret;
ret = user_init(fp);
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
+ mmdrop(fdata->mm);
kobject_put(&dd->kobj);
kfree(fdata);
return 0;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 1000e0fd96d9..a021e660d482 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1272,9 +1272,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
((!!(rhf_dc_info(rhf))) << 4);
}
+#define HFI1_JKEY_WIDTH 16
+#define HFI1_JKEY_MASK (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ * 0 - 31 - users with administrator privileges
+ * 32 - 63 - kernel protocols using KDETH packets
+ * 64 - 65535 - all other users using KDETH packets
+ */
static inline u16 generate_jkey(kuid_t uid)
{
- return from_kuid(current_user_ns(), uid) & 0xffff;
+ u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+ if (capable(CAP_SYS_ADMIN))
+ jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+ else if (jkey < 64)
+ jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+ return jkey;
}
/*
@@ -1656,7 +1673,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
const struct pci_device_id *);
void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index a358d23ecd54..b7935451093c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1333,7 +1333,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
- call_rcu(&cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(cc_state, rcu);
}
free_credit_return(dd);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 1263abe01999..39e42c373a01 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
+ if (dd->pport->port_type != PORT_TYPE_QSFP) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
spin_unlock(&ppd->cc_state_lock);
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(old_cc_state, rcu);
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
-void cc_state_reclaim(struct rcu_head *rcu)
-{
- struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
- kfree(cc_state);
-}
-
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index a5aa3517e7d5..4e4d8317c281 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
iter->dev = dev;
iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
- if (qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
return iter;
}
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index a207717ade2a..4e95ad810847 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
u8 *data)
{
struct hfi1_pportdata *ppd;
- u32 excess_len = 0;
- int ret = 0;
+ u32 excess_len = len;
+ int ret = 0, offset = 0;
if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n",
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
}
memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+ if (addr <= QSFP_MONITOR_VAL_END &&
+ (addr + len) >= QSFP_MONITOR_VAL_START) {
+ /* Overlap with the dynamic channel monitor range */
+ if (addr < QSFP_MONITOR_VAL_START) {
+ if (addr + len <= QSFP_MONITOR_VAL_END)
+ len = addr + len - QSFP_MONITOR_VAL_START;
+ else
+ len = QSFP_MONITOR_RANGE;
+ offset = QSFP_MONITOR_VAL_START - addr;
+ addr = QSFP_MONITOR_VAL_START;
+ } else if (addr == QSFP_MONITOR_VAL_START) {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_RANGE;
+ } else {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_VAL_END - addr + 1;
+ }
+ /* Refresh the values of the dynamic monitors from the cable */
+ ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+ if (ret != len) {
+ ret = -EAGAIN;
+ goto set_zeroes;
+ }
+ }
+
return 0;
set_zeroes:
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index 69275ebd9597..36cf52359848 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -74,6 +74,9 @@
/* Defined fields that Intel requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
#define QSFP_TX_CTRL_BYTE_OFFS 86
#define QSFP_PWR_CTRL_BYTE_OFFS 93
#define QSFP_CDR_CTRL_BYTE_OFFS 98
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index b738acdb9b02..8ec09e470f84 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -232,7 +232,7 @@ struct i40iw_device {
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
- unsigned long *mem_resources;
+ u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
*req_resource_num = resource_num;
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 5026dc79978a..7ca0638579c0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
buf += hdr_len;
}
- if (pd_len)
- memcpy(buf, pdata->addr, pd_len);
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
atomic_set(&sqbuf->refcount, 1);
@@ -3347,26 +3347,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
}
/**
- * i40iw_loopback_nop - Send a nop
- * @qp: associated hw qp
- */
-static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
-{
- u64 *wqe;
- u64 header;
-
- wqe = qp->qp_uk.sq_base->elem;
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(0, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, header);
-}
-
-/**
* i40iw_qp_disconnect - free qp and close cm
* @iwqp: associate qp for the connection
*/
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} else {
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- i40iw_loopback_nop(&iwqp->sc_qp);
+ dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
}
if (iwqp->page)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 6e9081380a27..0cbbe4038298 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1558,6 +1558,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
enum i40iw_status_code status;
struct i40iw_handler *hdl;
+ hdl = i40iw_find_netdev(ldev->netdev);
+ if (hdl)
+ return 0;
+
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0e8db0a35141..6fd043b1d714 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
{
if (!mem)
return I40IW_ERR_PARAM;
+ /*
+ * mem->va points to the parent of mem, so both mem and mem->va
+ * can not be touched once mem->va is freed
+ */
kfree(mem->va);
- mem->va = NULL;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 2360338877bf..6329c971c22f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return &iwqp->ibqp;
error:
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
- kfree(mem);
return ERR_PTR(err_code);
}
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
if (iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
- kfree(iwpbl->iwmr);
- iwpbl->iwmr = NULL;
+ kfree(iwmr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d6fc8a6e8c33..006db6436e3b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
checksum == cpu_to_be16(0xffff);
}
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
}
-
- return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -692,7 +690,7 @@ repoll:
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
pr_warn("Completion for NOP opcode detected!\n");
- return -EINVAL;
+ return -EAGAIN;
}
/* Resize CQ in progress */
@@ -723,7 +721,7 @@ repoll:
if (unlikely(!mqp)) {
pr_warn("CQ %06x with entry for unknown QPN %06x\n",
cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
- return -EINVAL;
+ return -EAGAIN;
}
*cur_qp = to_mibqp(mqp);
@@ -741,7 +739,7 @@ repoll:
if (unlikely(!msrq)) {
pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
cq->mcq.cqn, srq_num);
- return -EINVAL;
+ return -EAGAIN;
}
}
@@ -852,9 +850,11 @@ repoll:
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail,
- cqe, is_eth);
+ MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+ use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+ is_eth);
+ return 0;
+ }
}
wc->slid = be16_to_cpu(cqe->rlid);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index f02a975320bd..8db6fdff092f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -37,7 +37,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
#if defined(CONFIG_X86)
#include <asm/pat.h>
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 16740dcb876b..67fc0b6857e1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq =
(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
- attr->max_send_sge = ((rsp->max_write_send_sge &
+ attr->max_send_sge = ((rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
- attr->max_recv_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_recv_sge = (rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
- attr->max_rdma_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+ attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 0efc9662c6d8..37df4481bb8f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -554,9 +554,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -612,6 +612,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
};
struct ocrdma_mbx_query_config {
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
struct ocrdma_mbx_rsp rsp;
u32 qp_srq_cq_ird_ord;
u32 max_pd_ca_ack_delay;
- u32 max_write_send_sge;
+ u32 max_recv_send_sge;
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
+ u32 max_wr_rd_sge;
+ u32 ird_pgsz_num_pages;
};
struct ocrdma_fw_ver_rsp {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b1a3d91fe8b9..0aa854737e74 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = attr->max_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+ attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5e75b43c596b..5bad8e3b40bb 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
DEBUGFS_FILE(ctx_stats)
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+ __acquires(RCU)
{
struct qib_qp_iter *iter;
loff_t n = *pos;
- rcu_read_lock();
iter = qib_qp_iter_init(s->private);
+
+ /* stop calls rcu_read_unlock */
+ rcu_read_lock();
+
if (!iter)
return NULL;
- while (n--) {
+ do {
if (qib_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
- }
+ } while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
+ __must_hold(RCU)
{
struct qib_qp_iter *iter = iter_ptr;
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+ __releases(RCU)
{
rcu_read_unlock();
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index fcdf37913a26..c3edc033f7c4 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
pos = *ppos;
- if (pos != 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (count != sizeof(struct qib_flash)) {
- ret = -EINVAL;
- goto bail;
- }
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (pos != 0 || count != sizeof(struct qib_flash))
+ return -EINVAL;
- if (copy_from_user(tmp, buf, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
dd = private2dd(file);
if (qib_eeprom_write(dd, pos, tmp, count)) {
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
bail_tmp:
kfree(tmp);
-
-bail:
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 9cc0aae1d781..f9b8cd2354d1 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
return NULL;
iter->dev = dev;
- if (qib_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
return iter;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index c229b9f4a52d..0a89a955550b 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void)
return err;
}
- if (pci_register_driver(&usnic_ib_pci_driver)) {
+ err = pci_register_driver(&usnic_ib_pci_driver);
+ if (err) {
usnic_err("Unable to register with PCI\n");
goto out_umem_fini;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bdb540f25a88..870b4f212fbc 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -873,7 +873,8 @@ bail_qpn:
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- vfree(qp->r_rq.wq);
+ if (!qp->ip)
+ vfree(qp->r_rq.wq);
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ba6be060a476..7914c14478cd 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -448,7 +448,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
- isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+ ret = -ENOMEM;
goto out_unmap_login_req_buf;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dfa23b075a88..883bbfe08e0e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
goto err_query_port;
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
sdev->device->name, i);
goto err_ring;
}
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "0x%016llx%016llx",
- be64_to_cpu(sport->gid.global.subnet_prefix),
- be64_to_cpu(sport->gid.global.interface_id));
}
spin_lock(&srpt_dev_lock);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 7d61439be5f2..0c07e1023a46 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
udelay(100);
- reset_control_assert(kbc->rst);
+ reset_control_deassert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index faa295ec4f31..c83bce89028b 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
goto free_struct_buff;
reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
- map_offset = 0;
for (i = 0; i < rdesc->num_registers; i++) {
struct rmi_register_desc_item *item = &rdesc->registers[i];
int reg_size = struct_buf[offset];
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
item->reg = reg;
item->reg_size = reg_size;
+ map_offset = 0;
+
do {
for (b = 0; b < 7; b++) {
if (struct_buf[offset] & (0x1 << b))
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index b4d34086e73f..405252a884dd 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
serio->write = i8042_aux_write;
serio->start = i8042_start;
serio->stop = i8042_stop;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a61b2153ab8c..1ce3ecbe37f8 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi)
ads784x_hwmon_unregister(spi, ts);
- regulator_disable(ts->reg);
regulator_put(ts->reg);
if (!ts->get_pendown_state) {
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 7379fe153cf9..b2744a64e933 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -464,7 +464,7 @@ static int silead_ts_probe(struct i2c_client *client,
return -ENODEV;
/* Power GPIO pin */
- data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+ data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
if (IS_ERR(data->gpio_power)) {
if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
dev_err(dev, "Shutdown GPIO request failed\n");
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ce801170d5f2..641e88761319 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f49fe29f202..2db74ebc3240 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- int flags, ret;
- u32 fsr, fsynr, resume;
+ u32 fsr, fsynr;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
- if (fsr & FSR_IGN)
- dev_err_ratelimited(smmu->dev,
- "Unexpected context fault (fsr 0x%x)\n",
- fsr);
-
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
- if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
- ret = IRQ_HANDLED;
- resume = RESUME_RETRY;
- } else {
- dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- iova, fsynr, cfg->cbndx);
- ret = IRQ_NONE;
- resume = RESUME_TERMINATE;
- }
-
- /* Clear the faulting FSR */
- writel(fsr, cb_base + ARM_SMMU_CB_FSR);
- /* Retry or terminate any stalled transactions */
- if (fsr & FSR_SS)
- writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cfg->cbndx);
- return ret;
+ writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
}
static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
/* SCTLR */
- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 8c6139986d7d..def8ca1c982d 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
- if (attr & ARM_V7S_PTE_AP_RDONLY)
+ if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
+ if (pte & ARM_V7S_ATTR_XN(lvl))
+ prot |= IOMMU_NOEXEC;
return prot;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7ceaba81efb4..36b9c28a5c91 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
u32 val;
val = readl_relaxed(base + GITS_CTLR);
- if (val & GITS_CTLR_QUIESCENT)
+ /*
+ * GIC architecture specification requires the ITS to be both
+ * disabled and quiescent for writes to GITS_BASER<n> or
+ * GITS_CBASER to not have UNPREDICTABLE results.
+ */
+ if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
return 0;
/* Disable the generation of all interrupts to this ITS */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6fc56c3466b0..ede5672ab34d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -667,13 +667,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
#endif
#ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
if (cmd == CPU_PM_EXIT) {
- gic_enable_redist(true);
+ if (gic_dist_security_disabled())
+ gic_enable_redist(true);
gic_cpu_sys_reg_init();
- } else if (cmd == CPU_PM_ENTER) {
+ } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
gic_enable_redist(false);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index c2cab572c511..390fac59c6bc 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
int cpu;
unsigned long flags, map = 0;
+ if (unlikely(nr_cpu_ids == 1)) {
+ /* Only one CPU? let's do a self-IPI... */
+ writel_relaxed(2 << 24 | irq,
+ gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ return;
+ }
+
raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c5f33c3bd228..83f498393a7f 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -713,9 +713,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
unsigned long flags;
int i;
- irq_set_chip_and_handler(virq, &gic_level_irq_controller,
- handle_level_irq);
-
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
@@ -732,6 +729,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
{
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
return gic_local_irq_domain_map(d, virq, hw);
+
+ irq_set_chip_and_handler(virq, &gic_level_irq_controller,
+ handle_level_irq);
+
return gic_shared_irq_domain_map(d, virq, hw, 0);
}
@@ -771,11 +772,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
- &gic_edge_irq_controller,
+ &gic_level_irq_controller,
NULL);
if (ret)
goto error;
+ irq_set_handler(virq + i, handle_level_irq);
+
ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
if (ret)
goto error;
@@ -890,10 +893,17 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
return;
}
+static void gic_dev_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+}
+
static struct irq_domain_ops gic_dev_domain_ops = {
.xlate = gic_dev_domain_xlate,
.alloc = gic_dev_domain_alloc,
.free = gic_dev_domain_free,
+ .activate = gic_dev_domain_activate,
};
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index 978eda8d6678..8a3ba565106f 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id);
static struct i2c_driver ams_i2c_driver = {
.driver = {
.name = "ams",
- .owner = THIS_MODULE,
},
.probe = ams_i2c_probe,
.remove = ams_i2c_remove,
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 3024685e4cca..96d16fca68b2 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = {
.remove = wf_pm112_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 2f506b9d5a52..e88cfb36a74d 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = {
.remove = wf_pm72_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 82fc86a90c1a..bdfcb8a8bfbb 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = {
.remove = wf_rm31_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 95a4ca6ce6ff..849ad441cd76 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->nr_stripes ||
d->nr_stripes > INT_MAX ||
d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
- pr_err("nr_stripes too large");
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+ (unsigned)d->nr_stripes);
return -ENOMEM;
}
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca)
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = NULL;
+ const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
- if (ret != 0)
+ if (ret != 0) {
+ if (ret == -ENOMEM)
+ err = "cache_alloc(): -ENOMEM";
+ else
+ err = "cache_alloc(): unknown error";
goto err;
+ }
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
err = "error calling kobject_add";
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e9784b4e0ac..eedba67b0e3e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -181,7 +181,7 @@ struct crypt_config {
u8 key[0];
};
-#define MIN_IOS 16
+#define MIN_IOS 64
static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 97e446d54a15..6a2e8dd44a1b 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Map reads as normal only if corrupt_bio_byte set.
+ * Error reads if neither corrupt_bio_byte or drop_writes are set.
+ * Otherwise, flakey_end_io() will decide if the reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
- /* If flags were specified, only corrupt those that match. */
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc))
- goto map_bio;
- else
+ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
return -EIO;
+ goto map_bio;
}
/*
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- /*
- * Corrupt successful READs while in down state.
- */
if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
- if (fc->corrupt_bio_byte)
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc)) {
+ /*
+ * Corrupt successful matching READs while in down state.
+ */
corrupt_bio_data(bio, fc);
- else
+
+ } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+ /*
+ * Error read during the down_interval if drop_writes
+ * wasn't configured.
+ */
return -EIO;
+ }
}
return error;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 4ca2d1df5b44..07fc1ad42ec5 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
-static int rw_header(struct log_c *lc, int rw)
+static int rw_header(struct log_c *lc, int op)
{
- lc->io_req.bi_op = rw;
+ lc->io_req.bi_op = op;
+ lc->io_req.bi_op_flags = 0;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log)
{
int r;
- r = rw_header(log, READ);
+ r = rw_header(log, REQ_OP_READ);
if (r)
return r;
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log)
header_to_disk(&lc->header, lc->disk_header);
/* write the new header */
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (!r) {
r = flush_header(lc);
if (r)
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log)
log_clear_bit(lc, lc->clean_bits, i);
}
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (r)
fail_log_device(lc);
else {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1b9795d75ef8..8abde6b8cedc 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -191,7 +191,6 @@ struct raid_dev {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
-#define RT_FLAG_KEEP_RS_FROZEN 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
{
unsigned long min_region_size = rs->ti->len / (1 << 21);
+ if (rs_is_raid0(rs))
+ return 0;
+
if (!region_size) {
/*
* Choose a reasonable default. All figures in sectors.
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs)
rebuild_cnt++;
switch (rs->raid_type->level) {
+ case 0:
+ break;
case 1:
if (rebuild_cnt >= rs->md.raid_disks)
goto too_many;
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
case 0:
break;
default:
+ /*
+ * We have to keep any raid0 data/metadata device pairs or
+ * the MD raid0 personality will fail to start the array.
+ */
+ if (rs_is_raid0(rs))
+ continue;
+
dev = container_of(rdev, struct raid_dev, rdev);
if (dev->meta_dev)
dm_put_device(ti, dev->meta_dev);
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
} else {
/* Process raid1 without delta_disks */
mddev->raid_disks = rs->raid_disks;
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
reshape = false;
}
} else {
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
if (reshape) {
set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
} else if (mddev->raid_disks < rs->raid_disks)
/* Create new superblocks and bitmaps, if any new disks */
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
- set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
/* Takeover ain't recovery, so disable recovery */
rs_setup_recovery(rs, MaxSector);
rs_set_new(rs);
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
- if (!rs->md.suspended)
- mddev_suspend(&rs->md);
- rs->md.ro = 1;
- }
+ if (!rs->md.suspended)
+ mddev_suspend(&rs->md);
+
+ rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
{
int i;
- uint64_t failed_devices, cleared_failed_devices = 0;
+ uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
unsigned long flags;
+ bool cleared = false;
struct dm_raid_superblock *sb;
+ struct mddev *mddev = &rs->md;
struct md_rdev *r;
+ /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
+ if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
+ return;
+
+ memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
+
for (i = 0; i < rs->md.raid_disks; i++) {
r = &rs->dev[i].rdev;
if (test_bit(Faulty, &r->flags) && r->sb_page &&
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
* ourselves.
*/
if ((r->raid_disk >= 0) &&
- (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+ (mddev->pers->hot_remove_disk(mddev, r) != 0))
/* Failed to revive this device, try next */
continue;
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
clear_bit(Faulty, &r->flags);
clear_bit(WriteErrorSeen, &r->flags);
clear_bit(In_sync, &r->flags);
- if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+ if (mddev->pers->hot_add_disk(mddev, r)) {
r->raid_disk = -1;
r->saved_raid_disk = -1;
r->flags = flags;
} else {
r->recovery_offset = 0;
- cleared_failed_devices |= 1 << i;
+ set_bit(i, (void *) cleared_failed_devices);
+ cleared = true;
}
}
}
- if (cleared_failed_devices) {
+
+ /* If any failed devices could be cleared, update all sbs failed_devices bits */
+ if (cleared) {
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
+
rdev_for_each(r, &rs->md) {
sb = page_address(r->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- failed_devices &= ~cleared_failed_devices;
- sb->failed_devices = cpu_to_le64(failed_devices);
+ sb_retrieve_failed_devices(sb, failed_devices);
+
+ for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
+ failed_devices[i] &= ~cleared_failed_devices[i];
+
+ sb_update_failed_devices(sb, failed_devices);
}
}
}
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti)
* devices are reachable again.
*/
attempt_restore_of_faulty_devices(rs);
- } else {
- mddev->ro = 0;
- mddev->in_sync = 0;
+ }
- /*
- * When passing in flags to the ctr, we expect userspace
- * to reset them because they made it to the superblocks
- * and reload the mapping anyway.
- *
- * -> only unfreeze recovery in case of a table reload or
- * we'll have a bogus recovery/reshape position
- * retrieved from the superblock by the ctr because
- * the ongoing recovery/reshape will change it after read.
- */
- if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
- if (mddev->suspended)
- mddev_resume(mddev);
- }
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (mddev->suspended)
+ mddev_resume(mddev);
}
static struct target_type raid_target = {
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 4ace1da17db8..6c25213ab38c 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
struct path_info *pi = NULL;
struct dm_path *current_path = NULL;
+ local_irq_save(flags);
current_path = *this_cpu_ptr(s->current_path);
if (current_path) {
percpu_counter_dec(&s->repeat_count);
- if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ if (percpu_counter_read_positive(&s->repeat_count) > 0) {
+ local_irq_restore(flags);
return current_path;
+ }
}
- spin_lock_irqsave(&s->lock, flags);
+ spin_lock(&s->lock);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 7ada5f1b7bb6..3519acebfdab 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -230,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
if (phb->bus == NULL)
return -ENXIO;
+ /* Set release hook on root bus */
+ pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
+ pcibios_free_controller_deferred,
+ (void *) phb);
+
/* Claim resources. This might need some rework as well depending
* whether we are doing probe-only or not, like assigning unassigned
* resources etc...
@@ -256,7 +261,10 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
afu->phb = NULL;
pci_remove_root_bus(phb->bus);
- pcibios_free_controller(phb);
+ /*
+ * We don't free phb here - that's handled by
+ * pcibios_free_controller_deferred()
+ */
}
static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 48a5dd740f3b..2206d4477dbb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
break;
if (req_op(next) == REQ_OP_DISCARD ||
+ req_op(next) == REQ_OP_SECURE_ERASE ||
req_op(next) == REQ_OP_FLUSH)
break;
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- mmc_req_is_special(req))
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index bf14642a576a..708057261b38 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
/*
* We only like normal block requests and discards.
*/
- if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
+ if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+ req_op(req) != REQ_OP_SECURE_ERASE) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d)
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
+ bool req_is_special = mmc_req_is_special(req);
+
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
cond_resched();
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d)
* has been finished. Do not assign it to previous
* request.
*/
- if (mmc_req_is_special(req))
+ if (req_is_special)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d62531124d54..fee5e1271465 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,7 +4,9 @@
static inline bool mmc_req_is_special(struct request *req)
{
return req &&
- (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+ (req_op(req) == REQ_OP_FLUSH ||
+ req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE);
}
struct request;
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index c4c12d9d6956..afe56686b3d7 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -145,8 +145,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
u32 mask) \
{ \
- intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
priv->irq##which##_mask &= ~(mask); \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
} \
static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
u32 mask) \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 750d01d775e0..4e697eea6e0f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2646,15 +2646,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return ret;
}
+ /* Rate Control: disable ingress rate limiting. */
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
mv88e6xxx_6320_family(chip)) {
- /* Rate Control: disable ingress rate limiting. */
ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
if (ret)
return ret;
+ } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_RATE_CONTROL, 0x0000);
+ if (ret)
+ return ret;
}
/* Port Control 1: disable trunking, disable sending
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c4d0026a684a..d29a4f3102d6 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1612,6 +1612,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index 0959e6824cb6..1fc2d852249f 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -38,6 +38,7 @@
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_E2400 0xe0a1
+#define ALX_DEV_ID_E2500 0xe0b1
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 3bc0a04df107..c16ec3a51876 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -161,7 +161,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core) &&
!(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
- if (!IS_ERR(mii_bus)) {
+ if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index db9c632d0157..edf779f5a227 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -185,7 +185,6 @@
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
-#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d2d8ef270142..ad4fddb55421 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
- p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+ * produces bus errors when read
+ */
+ p[i++] = 0;
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d20935dc8399..4b4f5bc0e279 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
unsigned int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
+ bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
/* Remove the FCS from the packet length */
- if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ if (last)
size -= ETH_FCS_LEN;
- if (likely(first))
+ if (likely(first)) {
skb_put(skb, size);
- else
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
+ } else {
+ /* the last fragments' length contains the full frame length */
+ if (last)
+ size -= skb->len;
+
+ /* in case the last fragment consisted only of the FCS */
+ if (size > 0)
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+ }
/* try reuse page */
if (unlikely(page_count(page) != 1))
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 373fd094f2f3..6e8a9c8467b9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index ff8b6a468b24..6ea872287307 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{
u32 port;
- struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
port = ppe_cb->index;
dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5404b32c9adf..250db0b244b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -209,6 +209,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
+ int ret = 0;
if (!vsi)
return;
@@ -221,7 +222,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
"Cannot locate client instance open routine\n");
continue;
}
- cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state))) {
+ ret = cdev->client->ops->open(&cdev->lan_info,
+ cdev->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ }
}
}
mutex_unlock(&i40e_client_instance_mutex);
@@ -432,12 +440,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
*
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
**/
static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client)
+ struct i40e_client *client,
+ bool *existing)
{
struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL;
@@ -446,7 +456,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- cdev = NULL;
+ *existing = true;
goto out;
}
}
@@ -530,6 +540,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client_instance *cdev;
struct i40e_client *client;
+ bool existing = false;
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -553,18 +564,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
+ } else {
+ dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+ client->name);
}
/* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client);
+ cdev = i40e_client_add_instance(pf, client, &existing);
if (!cdev)
continue;
- /* Also up the ref_cnt of no. of instances of this client */
- atomic_inc(&client->ref_cnt);
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ if (!existing) {
+ /* Also up the ref_cnt for no. of instances of this
+ * client.
+ */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ }
mutex_lock(&i40e_client_instance_mutex);
/* Send an Open request to the client */
@@ -616,7 +634,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients.
+ * added, we can schedule a subtask to go initiate the clients if
+ * they can be launched at probe time.
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f355c04ee4f7..fcdea29be4ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5430,7 +5430,6 @@ int i40e_open(struct net_device *netdev)
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
udp_tunnel_get_rx_info(netdev);
- i40e_notify_client_of_netdev_open(vsi);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3095c24e80a9..77d3039283f6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2959,8 +2959,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index bb89f046fa27..1e639f886021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
-static u8 xor8_buf(void *buf, int len)
+static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
u8 sum = 0;
int i;
+ int end = len + offset;
- for (i = 0; i < len; i++)
+ for (i = offset; i < end; i++)
sum ^= ptr[i];
return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{
- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+ int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+ if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
return -EINVAL;
- if (xor8_buf(block, sizeof(*block)) != 0xff)
+ if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
return -EINVAL;
return 0;
}
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
- int csum)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
{
- block->token = token;
- if (csum) {
- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
- sizeof(block->data) - 2);
- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
- }
+ int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+ block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+ block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
-
- while (next) {
- calc_block_sig(next->buf, token, csum);
+ int size = msg->len;
+ int blen = size - min_t(int, sizeof(msg->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
+
+ for (i = 0; i < n && next; i++) {
+ calc_block_sig(next->buf);
next = next->next;
}
}
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
- ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
- calc_chain_sig(ent->in, ent->token, csum);
- calc_chain_sig(ent->out, ent->token, csum);
+ ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ if (csum) {
+ calc_chain_sig(ent->in);
+ calc_chain_sig(ent->out);
+ }
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd_mailbox *next = ent->out->next;
int err;
u8 sig;
+ int size = ent->out->len;
+ int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
- sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+ sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
return -EINVAL;
- while (next) {
+ for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
return err;
@@ -783,7 +797,6 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
- ent->token = alloc_token(cmd);
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
@@ -883,7 +896,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status)
+ void *context, int page_queue, u8 *status,
+ u8 token)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -900,6 +914,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (IS_ERR(ent))
return PTR_ERR(ent);
+ ent->token = token;
+
if (!callback)
init_completion(&ent->done);
@@ -971,7 +987,8 @@ static const struct file_operations fops = {
.write = dbg_write,
};
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+ u8 token)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_mailbox *next;
@@ -997,6 +1014,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
memcpy(block->data, from, copy);
from += copy;
size -= copy;
+ block->token = token;
next = next->next;
}
@@ -1066,7 +1084,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
}
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
- gfp_t flags, int size)
+ gfp_t flags, int size,
+ u8 token)
{
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
@@ -1095,6 +1114,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
tmp->next = head;
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block->block_num = cpu_to_be32(n - i - 1);
+ block->token = token;
head = tmp;
}
msg->next = head;
@@ -1463,7 +1483,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
}
if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@@ -1483,6 +1503,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
+ u8 token;
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1503,20 +1524,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err;
}
- err = mlx5_copy_to_msg(inb, in, in_size);
+ token = alloc_token(&dev->cmd);
+
+ err = mlx5_copy_to_msg(inb, in, in_size, token);
if (err) {
mlx5_core_warn(dev, "err %d\n", err);
goto out_in;
}
- outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status);
+ pages_queue, &status, token);
if (err)
goto out_out;
@@ -1587,7 +1610,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
@@ -1597,7 +1620,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
}
for (i = 0; i < NUM_MED_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 61902b147339..96995609f205 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -73,8 +73,12 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
- BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+ (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
};
enum {
- MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
- MLX5E_RQ_STATE_FLUSH_TIMEOUT,
MLX5E_RQ_STATE_AM,
};
@@ -304,6 +307,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
};
enum {
- MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE,
- MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
@@ -664,7 +667,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -780,11 +782,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
-static inline int mlx5e_get_mtt_octw(int npages)
-{
- return ALIGN(npages, 8) / 2;
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3247c3c543a0..029e856f72a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -142,7 +142,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
struct mlx5e_tir *tir;
void *in;
int inlen;
- int err;
+ int err = 0;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -154,10 +154,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
if (err)
- return err;
+ goto out;
}
+out:
kvfree(in);
- return 0;
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index caa9a3ccc3f3..762af16ed021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+ if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+ netdev_err(netdev,
+ "Failed to validate ETS: priority value greater than max(%d)\n",
+ MLX5E_MAX_PRIORITY);
return -EINVAL;
+ }
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
- if (!ets->tc_tx_bw[i])
+ if (!ets->tc_tx_bw[i]) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
+ }
bw_sum += ets->tc_tx_bw[i];
}
}
- if (bw_sum != 0 && bw_sum != 100)
+ if (bw_sum != 0 && bw_sum != 100) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
+ }
return 0;
}
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_dbcnl_validate_ets(ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 9561fca494bf..d1cd1564e9b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_wqe)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_wqe;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_packets)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+ int num_wqes;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_packets;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ num_packets = (1 << order_base_2(num_packets));
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+ return 1 << (order_base_2(num_wqes));
+}
+
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
+ param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = 1 << priv->params.log_rq_size;
+ param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
}
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
int rq_wq_type = priv->params.rq_wq_type;
+ u32 rx_pending_wqes;
+ u32 min_rq_size;
+ u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
+ u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
+
+ min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
+ max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
+ rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+ param->rx_pending);
+
+ if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_min_log_rq_size(rq_wq_type));
+ min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
+ if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
+ max_rq_size);
return -EINVAL;
}
+
+ num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+ rx_pending_wqes);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ __func__, param->rx_pending);
+ return -EINVAL;
+ }
+
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
- log_rq_size = order_base_2(param->rx_pending);
+ log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
+ u32 num_mtts;
int err = 0;
if (!count) {
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
+ num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
if (priv->params.num_channels == count)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index fa404142320d..03586ee68fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,13 +39,6 @@
#include "eswitch.h"
#include "vxlan.h"
-enum {
- MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
- MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
@@ -337,6 +331,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+ rq->mpwqe_mtt_offset = c->ix *
+ MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -425,7 +422,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
@@ -523,6 +519,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT;
}
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ /* UMR WQE (if in progress) is always at wq->head */
+ if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -546,8 +563,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
sq->ico_wqe_info[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -564,23 +579,8 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- int tout = 0;
- int err;
-
- clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
- tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
- if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
- /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
- napi_synchronize(&rq->channel->napi);
-
cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq);
@@ -819,7 +819,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
goto err_disable_sq;
if (sq->txq) {
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
}
@@ -843,38 +842,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- int tout = 0;
- int err;
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
if (sq->txq) {
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- /* prevent netif_tx_wake_queue */
- napi_synchronize(&sq->channel->napi);
netif_tx_disable_queue(sq->txq);
- /* ensure hw is notified of all pending wqes */
+ /* last doorbell out, godspeed .. */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
-
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_ERR, false, 0);
- if (err)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
- }
-
- /* wait till sq is empty, unless a TX timeout occurred on this SQ */
- while (sq->cc != sq->pc &&
- !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
- if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
- napi_synchronize(&sq->channel->napi);
-
- mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
+ mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1824,10 +1805,6 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
- err = mlx5e_set_dev_port_mtu(netdev);
- if (err)
- goto err_clear_state_opened_flag;
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2572,6 +2549,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
u16 max_mtu;
u16 min_mtu;
int err = 0;
+ bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -2587,13 +2565,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock);
+ reset = !priv->params.lro_en &&
+ (priv->params.rq_wq_type !=
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
+ if (was_opened && reset)
mlx5e_close_locked(netdev);
netdev->mtu = new_mtu;
+ mlx5e_set_dev_port_mtu(netdev);
- if (was_opened)
+ if (was_opened && reset)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -2793,7 +2776,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
@@ -3227,7 +3210,8 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u64 npages = priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
@@ -3239,6 +3223,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
@@ -3249,7 +3235,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size,
- mlx5e_get_mtt_octw(npages));
+ MLX5_MTT_OCTW(npages));
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
@@ -3385,6 +3371,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
rep.vport = 0;
@@ -3464,6 +3451,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_init_l2_addr(priv);
+ mlx5e_set_dev_port_mtu(netdev);
+
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3502,16 +3491,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
+ u8 mac[ETH_ALEN];
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return;
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
for (vport = 1; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep rep;
rep.load = mlx5e_vport_rep_load;
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
mlx5_eswitch_register_vport_rep(esw, &rep);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 681c12c14005..29db4735182a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u8 mac[ETH_ALEN];
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
attr->u.ppid.id_len = ETH_ALEN;
- memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9f2a16a507e0..b6f8ebbdb487 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
- return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+ return rq->mpwqe_mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_data_seg *dseg = &wqe->data;
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
memset(wqe, 0, sizeof(*wqe));
cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
ucseg->klm_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
{
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
int mtt_sz = mlx5e_get_wqe_mtt_sz();
- u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+ u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int i;
wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ return;
+ }
+
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
rq->stats.mpwqe_frag++;
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
wi->free_wqe(rq, wi);
}
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_ix_be;
- u16 wqe_ix;
-
- while (!mlx5_wq_ll_is_empty(wq)) {
- wqe_ix_be = *wq->tail_next;
- wqe_ix = be16_to_cpu(wqe_ix_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
- rq->dealloc_wqe(rq, wqe_ix);
- mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
- &wqe->next.next_wqe_index);
- }
-}
-
#define RQ_CANNOT_POST(rq) \
- (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
- test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
@@ -916,7 +905,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
return 0;
if (cq->decmprs_left)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 7b9d8a989b52..499487ce3b53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
+ u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 bytes;
+ u64 xmit_more;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dc8b1cb0fdc8..22cfc4ac1837 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
+ FLOW_DISSECTOR_KEY_CONTROL,
f->key);
addr_type = key->addr_type;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e073bf59890d..988eca99ee0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -375,6 +375,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets++;
sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
-{
- struct mlx5e_tx_wqe_info *wi;
- struct sk_buff *skb;
- u16 ci;
- int i;
-
- while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
-
- if (!skb) { /* nop */
- sq->cc++;
- continue;
- }
-
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
-
- dev_kfree_skb_any(skb);
- sq->cc += wi->num_wqebbs;
- }
-}
-
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false;
npkts = 0;
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
- likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
- netif_tx_wake_queue(sq->txq);
- sq->stats.wake++;
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 64ae2e800daa..9bf33bb69210 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
+ struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_sq *sq;
u16 sqcc;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return;
+
cqe = mlx5e_get_cqe(cq);
if (likely(!cqe))
return;
- sq = container_of(cq, struct mlx5e_sq, cq);
wq = &sq->wq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7c4935993b99..101430571d6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1421,7 +1421,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ /* Only VFs need ACLs for VST and spoofchk filtering */
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1472,7 +1473,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- if (vport_num) {
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1737,7 +1738,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
vport, err);
mutex_lock(&esw->state_lock);
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
return err;
@@ -1809,7 +1810,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport->vlan = vlan;
evport->qos = qos;
- if (evport->enabled) {
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto out;
@@ -1838,10 +1839,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
pschk = evport->spoofchk;
evport->spoofchk = spoofchk;
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
+ if (err)
+ evport->spoofchk = pschk;
+ }
mutex_unlock(&esw->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c0b05603fc31..a96140971d77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
void *priv_data;
struct list_head vport_sqs_list;
bool valid;
+ u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a357e8eeeed8..3dc83a9459a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
- flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule))
@@ -535,7 +535,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
esw_destroy_offloads_fdb_table(esw);
}
-static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -551,6 +551,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
return 0;
}
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+ switch (mlx5_mode) {
+ case SRIOV_LEGACY:
+ *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ break;
+ case SRIOV_OFFLOADS:
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct mlx5_core_dev *dev;
@@ -566,7 +582,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
if (cur_mlx5_mode == SRIOV_NONE)
return -EOPNOTSUPP;
- if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
if (cur_mlx5_mode == mlx5_mode)
@@ -592,9 +608,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
- *mode = dev->priv.eswitch->mode;
-
- return 0;
+ return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f5571a5a57c5..5da2cc878582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -80,7 +80,7 @@
LEFTOVERS_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
-#define ETHTOOL_NUM_PRIOS 10
+#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index c2877e9de8a1..3a9195b4169d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
for (node = &first->node; node; node = rb_next(node)) {
struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
if (counter->id > last_id)
break;
mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &c->packets, &c->bytes);
+ counter->id, &packets, &bytes);
+
+ if (c->packets == packets)
+ continue;
+
+ c->packets = packets;
+ c->bytes = bytes;
+ c->lastuse = jiffies;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28e3ce690e3f..c132ef1faefe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1417,36 +1417,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv);
+ pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
-
- dev_info(&pdev->dev, "%s was called\n", __func__);
-
- err = mlx5_pci_enable_device(dev);
- if (err) {
- dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
- , __func__, err);
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
/* wait for the device to show vital signs by waiting
* for the health counter to start counting.
*/
@@ -1474,21 +1450,44 @@ static int wait_vital(struct pci_dev *pdev)
return -ETIMEDOUT;
}
-static void mlx5_pci_resume(struct pci_dev *pdev)
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
dev_info(&pdev->dev, "%s was called\n", __func__);
- pci_save_state(pdev);
- err = wait_vital(pdev);
+ err = mlx5_pci_enable_device(dev);
if (err) {
+ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+ , __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
- return;
+ return PCI_ERS_RESULT_DISCONNECT;
}
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
err = mlx5_load_one(dev, priv);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index f33b997f2b61..af371a82c35b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -56,6 +56,7 @@
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
#define MLXSW_PORT_CPU_PORT 0x0
+#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 8c9c124724a6..6c6b726c4897 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3248,6 +3248,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_find(mlxsw_sp, fid);
}
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+ 1, MLXSW_PORT_ROUTER_PORT, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
@@ -3284,10 +3317,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
if (rif == MLXSW_SP_RIF_MAX)
return -ERANGE;
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
if (err)
return err;
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ if (err)
+ goto err_rif_bridge_op;
+
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err)
goto err_rif_fdb_op;
@@ -3309,6 +3346,8 @@ err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
return err;
}
@@ -3328,6 +3367,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 237418a0e6e0..953b214f38d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
- u8 pool = pool_index;
+ u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
+ if (dir != dir_get(pool_index))
+ return -EINVAL;
+
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff);
if (err)
return err;
- if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
- if (pool < MLXSW_SP_SB_POOL_COUNT)
- return -EINVAL;
- pool -= MLXSW_SP_SB_POOL_COUNT;
- } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
- return -EINVAL;
- }
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
0, max_buff, pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 90bb93b037ec..917ddd1e422f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -657,7 +657,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
return 0;
}
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
if (WARN_ON(!r))
return -EINVAL;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4d4ecba0aad9..8e13ec84c538 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8;
}
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
- if (enable)
- clk_prepare_enable(pldat->clk);
- else
- clk_disable_unprepare(pldat->clk);
-}
-
static void __lpc_params_setup(struct netdata_local *pldat)
{
u32 tmp;
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
- __lpc_eth_clock_enable(pldat, false);
+ clk_disable_unprepare(pldat->clk);
return 0;
}
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
+ int ret;
if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Suspended PHY makes LPC ethernet core block, so resume now */
phy_resume(ndev->phydev);
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
/* Enable network clock */
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ goto err_out_clk_put;
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1454,6 +1451,7 @@ err_out_iounmap:
iounmap(pldat->net_base);
err_out_disable_clocks:
clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
clk_put(pldat->clk);
err_out_free_dev:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 9ba7a7ee3357..2d67469eb8f6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -563,9 +563,18 @@ struct qed_dev {
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid,
+ PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
- return pfid;
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
}
#define PURE_LB_TC 8
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4056219591c9..b4a56e61631a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -722,11 +722,14 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
netif_tx_stop_queue(netdev_txq);
txq->stopped_cnt++;
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d7426d..5297bf77211c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
unsigned int rx_tail = cp->rx_tail;
int rx;
-rx_status_loop:
rx = 0;
+rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 6fe1d0ba78b2..00279da6a1e8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1616,13 +1616,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
}
#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
mask[0] = raw_mask[0];
mask[1] = raw_mask[1];
#else
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
mask[0] = raw_mask[0] & 0xffffffff;
mask[1] = raw_mask[0] >> 32;
mask[2] = raw_mask[1] & 0xffffffff;
- mask[3] = raw_mask[1] >> 32;
#endif
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 726b80f45906..503a3b6dce91 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+
+ if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
+ dev_err(&pdev->dev,
+ "at least one of 8-bit or 16-bit access support is required.\n");
+ ret = -ENXIO;
+ goto out_free_netdev;
+ }
}
#if IS_BUILTIN(CONFIG_OF)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 1a55c7976df0..e17671c9d1b0 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -37,6 +37,27 @@
#include <linux/smc91x.h>
/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw_b(x, a, r) \
+ do { \
+ unsigned int __val16 = (x); \
+ unsigned int __reg = (r); \
+ SMC_outb(__val16, a, __reg); \
+ SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
+ } while (0)
+
+#define SMC_inw_b(a, r) \
+ ({ \
+ unsigned int __val16; \
+ unsigned int __reg = r; \
+ __val16 = SMC_inb(a, __reg); \
+ __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+/*
* Define your architecture specific bus configuration parameters here.
*/
@@ -55,10 +76,30 @@
#define SMC_IO_SHIFT (lp->io_shift)
#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inw(a, r) \
+ ({ \
+ unsigned int __smc_r = r; \
+ SMC_16BIT(lp) ? readw((a) + __smc_r) : \
+ SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
+ ({ BUG(); 0; }); \
+ })
+
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) \
+ do { \
+ unsigned int __v = v, __smc_r = r; \
+ if (SMC_16BIT(lp)) \
+ __SMC_outw(__v, a, __smc_r); \
+ else if (SMC_8BIT(lp)) \
+ SMC_outw_b(__v, a, __smc_r); \
+ else \
+ BUG(); \
+ } while (0)
+
#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@@ -66,7 +107,7 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
if ((machine_is_mainstone() || machine_is_stargate2() ||
machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT
-/*
- * Any 16-bit access is performed with two 8-bit accesses if the hardware
- * can't do it directly. Most registers are 16-bit so those are mandatory.
- */
-#define SMC_outw(x, ioaddr, reg) \
- do { \
- unsigned int __val16 = (x); \
- SMC_outb( __val16, ioaddr, reg ); \
- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
- } while (0)
-#define SMC_inw(ioaddr, reg) \
- ({ \
- unsigned int __val16; \
- __val16 = SMC_inb( ioaddr, reg ); \
- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
- __val16; \
- })
-
+#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
+#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
#define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 9f159a775af3..5a3941bf250f 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
DWCEQOS_MMC_CTRL_RSTONRD);
dwceqos_enable_mmc_interrupt(lp);
- /* Enable Interrupts */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
+ /* Enable Interrupts -- do this only after we enable NAPI and the
+ * tasklet.
+ */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
return 0;
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 7452b5f9d024..7108c68f16d3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
err = pci_enable_msi(pdev);
if (err)
- pr_err("Can't eneble msi. error is %d\n", err);
+ pr_err("Can't enable msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 3cee84a24815..93dc10b10c09 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address)
+ if (mac_address) {
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
- else
- dev_warn(dev, "No MAC address found\n");
+ } else {
+ dev_warn(dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 053e87905b94..885ac9cbab5a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -964,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c5dc2c363f96..c6f66832a1a6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -722,8 +722,10 @@ phy_err:
int phy_start_interrupts(struct phy_device *phydev)
{
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
- phydev) < 0) {
+ if (request_irq(phydev->irq, phy_interrupt,
+ IRQF_SHARED,
+ "phy_interrupt",
+ phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->mdio.bus->name, phydev->irq);
phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index cdb19b385d42..b228bea7931f 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -14,9 +14,23 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/if_team.h>
+static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb)
+{
+ if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
+ /* LACPDU packets should go to exact delivery */
+ const unsigned char *dest = eth_hdr(skb)->h_dest;
+
+ if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
+ return RX_HANDLER_EXACT;
+ }
+ return RX_HANDLER_ANOTHER;
+}
+
struct lb_priv;
typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
.port_enter = lb_port_enter,
.port_leave = lb_port_leave,
.port_disabled = lb_port_disabled,
+ .receive = lb_receive,
.transmit = lb_transmit,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e249428f2d83..8093e39ae263 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -888,11 +888,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
- if (skb->sk && sk_fullsock(skb->sk)) {
- sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
- &skb_shinfo(skb)->tx_flags);
- sw_tx_timestamp(skb);
- }
+ skb_tx_timestamp(skb);
/* Orphan the skb - required as we might hang on to it
* for indefinite time.
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 770212baaf05..528b9c9c4e60 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1009,6 +1009,7 @@ static int kaweth_probe(
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
+ int rv = -EIO;
dev_dbg(dev,
"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@ static int kaweth_probe(
kaweth = netdev_priv(netdev);
kaweth->dev = udev;
kaweth->net = netdev;
+ kaweth->intf = intf;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@ static int kaweth_probe(
/* Download the firmware */
dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+ if (!kaweth->firmware_buf) {
+ rv = -ENOMEM;
+ goto err_free_netdev;
+ }
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin",
100,
@@ -1139,8 +1145,6 @@ err_fw:
dev_dbg(dev, "Initializing net device.\n");
- kaweth->intf = intf;
-
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb)
goto err_free_netdev;
@@ -1204,7 +1208,7 @@ err_only_tx:
err_free_netdev:
free_netdev(netdev);
- return -EIO;
+ return rv;
}
/****************************************************************
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fc68dd4aec8f..2fd93b4c759a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{
struct Vmxnet3_TxDataDesc *tdd;
- tdd = tq->data_ring.base + tq->tx_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
+ tq->tx_ring.next2fill *
+ tq->txdata_desc_size);
memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 74fc03072b87..7dc37a090549 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7ff2e820bbf4..2feacc70bf61 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
- enum nvme_ctrl_state old_state = ctrl->state;
+ enum nvme_ctrl_state old_state;
bool changed = false;
spin_lock_irq(&ctrl->lock);
+
+ old_state = ctrl->state;
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
default:
break;
}
- spin_unlock_irq(&ctrl->lock);
if (changed)
ctrl->state = new_state;
+ spin_unlock_irq(&ctrl->lock);
+
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
- if (ret >= 0)
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
- if (ret >= 0)
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7792266db259..3ce69536a7b3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
*/
err:
- if (it.node)
- of_node_put(it.node);
+ of_node_put(it.node);
return rc;
}
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
- struct device_node *node, *prev_node = NULL;
-
- while (1) {
- node = of_graph_get_next_endpoint(parent, prev_node);
- of_node_put(prev_node);
- if (!node)
- break;
+ struct device_node *node = NULL;
+ for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
-
- prev_node = node;
}
return NULL;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 55f1b8391149..085c6389afd1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob,
pr_warning("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
- if (detached) {
+ if (detached && mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 89a71c6074fc..a2e68f740eda 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches)
list_del(&desc->list);
+ of_node_set_flag(desc->dev, OF_POPULATED);
+
pr_debug("of_irq_init: init %s (%p), parent %p\n",
desc->dev->full_name,
desc->dev, desc->interrupt_parent);
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
}
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches)
* its children can get processed in a subsequent pass.
*/
list_add_tail(&desc->list, &intc_parent_list);
-
- of_node_set_flag(desc->dev, OF_POPULATED);
}
/* Get the next pending parent that might have children */
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 8aa197691074..f39ccd5aa701 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root,
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
+#ifndef CONFIG_PPC
static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void)
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
+#endif
static int of_platform_device_destroy(struct device *dev, void *data)
{
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 5f4a2e04c8d7..add66236215c 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
bridge->release_fn = release_fn;
bridge->release_data = release_data;
}
+EXPORT_SYMBOL_GPL(pci_set_host_bridge_release);
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index eafa6138a6b8..98f12223c734 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ if (flags & PCI_IRQ_AFFINITY) {
dev->irq_affinity = irq_create_affinity_mask(&nvec);
if (nvec < minvec)
return -ENOSPC;
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+ return __pci_enable_msi_range(dev, minvec, maxvec, 0);
}
EXPORT_SYMBOL(pci_enable_msi_range);
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
return -ERANGE;
for (;;) {
- if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ if (flags & PCI_IRQ_AFFINITY) {
dev->irq_affinity = irq_create_affinity_mask(&nvec);
if (nvec < minvec)
return -ENOSPC;
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec,
- PCI_IRQ_NOAFFINITY);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
}
EXPORT_SYMBOL(pci_enable_msix_range);
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
{
int vecs = -ENOSPC;
- if (!(flags & PCI_IRQ_NOMSIX)) {
+ if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
flags);
if (vecs > 0)
return vecs;
}
- if (!(flags & PCI_IRQ_NOMSI)) {
+ if (flags & PCI_IRQ_MSI) {
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
if (vecs > 0)
return vecs;
}
/* use legacy irq if allowed */
- if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+ pci_intx(dev, 1);
return 1;
+ }
+
return vecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors);
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index f99b183d5296..374a8028fec7 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -1,6 +1,8 @@
/*
* Generic driver for the OLPC Embedded Controller.
*
+ * Author: Andres Salomon <dilinger@queued.net>
+ *
* Copyright (C) 2011-2012 One Laptop per Child Foundation.
*
* Licensed under the GPL v2 or later.
@@ -12,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/list.h>
#include <linux/olpc-ec.h>
#include <asm/olpc.h>
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void)
{
return platform_driver_register(&olpc_ec_plat_driver);
}
-
arch_initcall(olpc_ec_init_module);
-
-MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 63b371d6ee55..91ae58510d92 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -1,6 +1,8 @@
/* Moorestown PMIC GPIO (access through IPC) driver
* Copyright (c) 2008 - 2009, Intel Corporation.
*
+ * Author: Alek Du <alek.du@intel.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -21,7 +23,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void)
{
return platform_driver_register(&platform_pmic_gpio_driver);
}
-
subsys_initcall(platform_pmic_gpio_init);
-
-MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index b381b3718a98..5648b715fed9 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
- unsigned size;
+ unsigned int size, osize;
int retval;
if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ osize = size = le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
goto cleanup;
}
+ /* Sanity check the second copy */
+ if ((osize != le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr))
+ || (size < le16_to_cpu(kfib->header.SenderSize))) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index a569c65f22b1..dcf36537a767 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_unlock(&fip->ctlr_mutex);
drop:
- kfree(skb);
+ kfree_skb(skb);
return rc;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2dab3dc2aa69..c1ed25adb17e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- if (pci_request_selected_regions(instance->pdev, instance->bar,
+ if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
@@ -5339,7 +5339,7 @@ fail_ready_state:
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
return -EINVAL;
}
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ec837544f784..52d8bbf7feb5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 751f13edece0..750f82c339d4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
} else
ioc->msix96_vector = 0;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
- if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
- &ioc->chip->ReplyPostHostIndex;
-
- for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] =
- (resource_size_t __iomem *)
- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
- * 4)));
- }
-
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 53ef1cb6418e..0e8601aa877a 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
if (!edev)
return;
+ enclosure_unregister(edev);
+
ses_dev = edev->scratch;
edev->scratch = NULL;
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
kfree(edev->component[0].scratch);
put_device(&edev->edev);
- enclosure_unregister(edev);
}
static void ses_intf_remove(struct device *cdev,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 3788ed74c9ab..a32b41783b77 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
}
/* Bind cpufreq callbacks to thermal cooling device ops */
+
static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
};
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+ .get_max_state = cpufreq_get_max_state,
+ .get_cur_state = cpufreq_get_cur_state,
+ .set_cur_state = cpufreq_set_cur_state,
+ .get_requested_power = cpufreq_get_requested_power,
+ .state2power = cpufreq_state2power,
+ .power2state = cpufreq_power2state,
+};
+
/* Notifier for cpufreq policy change */
static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np,
struct cpumask temp_mask;
unsigned int freq, i, num_cpus;
int ret;
+ struct thermal_cooling_device_ops *cooling_ops;
cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np,
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
if (capacitance) {
- cpufreq_cooling_ops.get_requested_power =
- cpufreq_get_requested_power;
- cpufreq_cooling_ops.state2power = cpufreq_state2power;
- cpufreq_cooling_ops.power2state = cpufreq_power2state;
cpufreq_dev->plat_get_static_power = plat_static_func;
ret = build_dyn_power_table(cpufreq_dev, capacitance);
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np,
cool_dev = ERR_PTR(ret);
goto free_table;
}
+
+ cooling_ops = &cpufreq_power_cooling_ops;
+ } else {
+ cooling_ops = &cpufreq_cooling_ops;
}
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np,
cpufreq_dev->id);
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ cooling_ops);
if (IS_ERR(cool_dev))
goto remove_idr;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd711db..e473548b5d28 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
static int imx_thermal_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
struct regmap *map;
int measure_freq;
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
}
data->tempmon = map;
- data->socdata = of_id->data;
+ data->socdata = of_device_get_match_data(&pdev->dev);
/* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
if (data->socdata->version == TEMPMON_IMX6SX) {
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index a578cd257db4..1891f34ab7fc 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = {
.remove = int3406_thermal_remove,
.driver = {
.name = "int3406 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3406_thermal_match,
},
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 71912301ef7f..0f3f62e81e5b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1354,7 +1354,6 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
@@ -1394,14 +1393,14 @@ made_compressed_probe:
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
usb_fill_int_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
} else {
usb_fill_bulk_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 05ce308d5d2a..1f1eabfd8462 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
struct acm_rb read_buffers[ACM_NR];
struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
- int rx_endpoint;
spinlock_t read_lock;
int write_used; /* number of non-empty write buffers */
int transmitting;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 31ccdccd7a04..051163189810 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep, buffer, size);
}
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 8,
+ [USB_ENDPOINT_XFER_ISOC] = 0,
+ [USB_ENDPOINT_XFER_BULK] = 0,
+ [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1023,
+ [USB_ENDPOINT_XFER_BULK] = 64,
+ [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 512,
+ [USB_ENDPOINT_XFER_INT] = 1023,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 512,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 1024,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
+ unsigned int maxp;
+ const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
@@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
+ /* Validate the wMaxPacketSize field */
+ maxp = usb_endpoint_maxp(&endpoint->desc);
+
+ /* Find the highest legal maxpacket size for this endpoint */
+ i = 0; /* additional transactions per microframe */
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_LOW:
+ maxpacket_maxes = low_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_FULL:
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+ /* Bits 12..11 are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+ i = maxp & (BIT(12) | BIT(11));
+ maxp &= ~i;
+ }
+ /* fallthrough */
+ default:
+ maxpacket_maxes = high_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ maxpacket_maxes = super_speed_maxpacket_maxes;
+ break;
+ }
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+ }
+
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
@@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
- unsigned maxp;
-
- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9f5043a2167..e6a6d67c8705 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
- mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+ mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+ &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@@ -2582,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file,
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
mask |= POLLOUT | POLLWRNORM;
if (!connected(ps))
- mask |= POLLERR | POLLHUP;
+ mask |= POLLHUP;
+ if (list_empty(&ps->list))
+ mask |= POLLERR;
return mask;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bee13517676f..1d5fc32d06d0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
- device_lock(hub->intfdev);
+ device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
- if (hub->disconnected) {
- device_unlock(hub->intfdev);
- kref_put(&hub->kref, hub_release);
- return;
- }
+ if (hub->disconnected)
+ goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_hub_wq(hub);
- /* Allow autosuspend if it was suppressed */
- if (type <= HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
- if (type == HUB_INIT2 || type == HUB_INIT3)
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
+ }
kref_put(&hub->kref, hub_release);
}
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
struct usb_device *hdev = hub->hdev;
int i;
- cancel_delayed_work_sync(&hub->init_work);
-
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 974335377d9f..e56d59b19a0e 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (!simple->clks)
return -ENOMEM;
+ platform_set_drvdata(pdev, simple);
simple->dev = dev;
for (i = 0; i < simple->num_clocks; i++) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 45f5a232d9fb..2eb84d6c24a6 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -37,6 +37,7 @@
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8f8c2157910e..1f5597ef945d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
if (!req->request.no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
- if (last)
+ if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (chain)
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, struct dwc3_trb *trb,
- const struct dwc3_event_depevt *event, int status)
+ const struct dwc3_event_depevt *event, int status,
+ int chain)
{
unsigned int count;
unsigned int s_pkt = 0;
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
dep->queued_requests--;
trace_dwc3_complete_trb(dep, trb);
+ /*
+ * If we're in the middle of series of chained TRBs and we
+ * receive a short transfer along the way, DWC3 will skip
+ * through all TRBs including the last TRB in the chain (the
+ * where CHN bit is zero. DWC3 will also avoid clearing HWO
+ * bit and SW has to do it manually.
+ *
+ * We're going to do that here to avoid problems of HW trying
+ * to use bogus TRBs for transfers.
+ */
+ if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
- /*
- * We continue despite the error. There is not much we
- * can do. If we don't clean it up we loop forever. If
- * we skip the TRB then it gets overwritten after a
- * while since we use them in a ring buffer. A BUG()
- * would help. Lets hope that if this occurs, someone
- * fixes the root cause instead of looking away :)
- */
- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
- dep->name, trb);
+ return 1;
+
count = trb->size & DWC3_TRB_SIZE_MASK;
if (dep->direction) {
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
s_pkt = 1;
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- req->request.actual += req->request.length - count;
- if (s_pkt)
+ if (s_pkt && !chain)
return 1;
if ((event->status & DEPEVT_STATUS_LST) &&
(trb->ctrl & (DWC3_TRB_CTRL_LST |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_trb *trb;
unsigned int slot;
unsigned int i;
+ int count = 0;
int ret;
do {
+ int chain;
+
req = next_request(&dep->started_list);
if (WARN_ON_ONCE(!req))
return 1;
+ chain = req->request.num_mapped_sgs > 0;
i = 0;
do {
slot = req->first_trb_index + i;
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
slot++;
slot %= DWC3_TRB_NUM;
trb = &dep->trb_pool[slot];
+ count += trb->size & DWC3_TRB_SIZE_MASK;
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
- event, status);
+ event, status, chain);
if (ret)
break;
} while (++i < req->request.num_mapped_sgs);
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += req->request.length - count;
dwc3_gadget_giveback(dep, req, status);
if (ret)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eb648485a58c..5ebe6af7976e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1913,6 +1913,8 @@ unknown:
break;
case USB_RECIP_ENDPOINT:
+ if (!cdev->config)
+ break;
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(f, &cdev->config->functions, list) {
if (test_bit(endp, f->endpoints))
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
if (!cdev->os_desc_req) {
- ret = PTR_ERR(cdev->os_desc_req);
+ ret = -ENOMEM;
goto end;
}
/* OS feature descriptor length <= 4kB */
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
- ret = PTR_ERR(cdev->os_desc_req->buf);
+ ret = -ENOMEM;
kfree(cdev->os_desc_req);
goto end;
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 70cf3477f951..f9237fe2be05 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
+ mutex_lock(&gi->lock);
unregister_gadget(gi);
+ mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 943c21aafd3b..ab6ac1b74ac0 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
+ u8 *xbuf;
+ u32 length;
+
+ /* drain the response queue */
+ while ((xbuf = rndis_get_next_response(params, &length)))
+ rndis_free_response(params, xbuf);
r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
if (!r)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index a3f7e7c55ebb..5f562c1ec795 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* Multi frame CDC protocols may store the frame for
* later which is not a dropped frame.
*/
- if (dev->port_usb->supports_multi_frame)
+ if (dev->port_usb &&
+ dev->port_usb->supports_multi_frame)
goto multiframe;
goto drop;
}
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 66753ba7a42e..31125a4a2658 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
if (!data) {
kfree(*class_array);
*class_array = NULL;
- ret = PTR_ERR(data);
+ ret = -ENOMEM;
goto unlock;
}
cl_arr = *class_array;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index aa3707bdebb4..16104b5ebdcb 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
- if (unlikely(epdata->ep))
+ if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
if (is_sync_kiocb(iocb)) {
value = ep_io(epdata, buf, len);
- if (value >= 0 && copy_to_iter(buf, value, to))
+ if (value >= 0 && (copy_to_iter(buf, value, to) != value))
value = -EFAULT;
} else {
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index ff8685ea7219..934f83881c30 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
if (ret != -EPROBE_DEFER)
list_del(&driver->pending);
if (ret)
- goto err4;
+ goto err5;
break;
}
}
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
return 0;
+err5:
+ device_del(&udc->dev);
+
err4:
list_del(&udc->list);
mutex_unlock(&udc_lock);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 93d28cb00b76..cf8819a5c5b2 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc,
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
- || pipe > USB_MAX_ENDPOINTS)
+ || pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index a962b89b65a6..1e5f529d51a2 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- ehci_writel(ehci, PORT_RWC_BITS,
- &ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, PORT_RWC_BITS,
+ &ehci->regs->port_status[port]);
}
}
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index c369c29e496d..2f7690092a7f 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
if (pin_number > 7)
return;
- mask = 1u << pin_number;
+ mask = 1u << (pin_number % 4);
idx = pin_number / 4;
if (value)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index d61fcc48099e..730b9fd26685 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = 0;
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -ENODEV;
+
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4fd041bec332..d7b0f97abbad 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
}
- usb_hcd_pci_remove(dev);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
+
+ usb_hcd_pci_remove(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 918e0c739b79..fd9fd12e4861 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1334,12 +1334,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if (cmd->command_trb != xhci->cmd_ring->dequeue) {
- xhci_err(xhci,
- "Command completion event does not match command\n");
- return;
- }
-
del_timer(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1351,6 +1345,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_stopped_cmd_ring(xhci, cmd);
return;
}
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
@@ -3243,7 +3244,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
- for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
+ for (enqd_len = 0; first_trb || enqd_len < full_len;
+ enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
/* TRB buffer should not cross 64KB boundaries */
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 52c27cab78c3..9b5b3b2281ca 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
{
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int bytes_read = 0;
int retry_on_empty = 10;
int retry_on_timeout = 5;
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) {
int i = 0;
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int l = 0;
struct u132_target *target = &ftdi->target[ed];
struct u132_command *command = &ftdi->command[
@@ -1876,7 +1876,7 @@ more:{
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi)
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
unsigned char c = 0;
@@ -2155,7 +2155,7 @@ more:{
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 6b978f04b8d7..5c8210dc6fd9 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req)
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
- req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
@@ -616,8 +615,10 @@ static int perform_sglist(
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- del_timer_sync(&sg_timer);
- retval = req->status;
+ if (!del_timer_sync(&sg_timer))
+ retval = -ETIMEDOUT;
+ else
+ retval = req->status;
/* FIXME check resulting data pattern */
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
ktime_get_ts64(&start);
retval = usbtest_do_ioctl(intf, param_32);
- if (retval)
+ if (retval < 0)
goto free_mutex;
ktime_get_ts64(&end);
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 6f6d2a7fd5a0..6523af4f8f93 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev)
(rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
otg_dev->vbus);
+ platform_set_drvdata(pdev, otg_dev);
+
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8fbbc2d32371..ac67bab9124c 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
if (gpio > 0)
dparam->enable_gpio = gpio;
- if (dparam->type == USBHS_TYPE_RCAR_GEN2)
+ if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
+ dparam->type == USBHS_TYPE_RCAR_GEN3)
dparam->has_usb_dmac = 1;
return info;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 280ed5ff021b..857e78337324 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 50f3363cc382..92bc83b92d10 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
- if (usb_endpoint_dir_in(desc))
+ if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
- else
+ } else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
+ usbhs_xxxsts_clear(priv, BRDYSTS,
+ usbhs_pipe_number(pipe));
+ }
ret = 0;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 00820809139a..b2d767e743fc 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c5d6c1e73e8e..f87a938cf005 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -406,6 +406,12 @@
#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID 0xf440
+#define FTDI_IVIUM_XSTAT_PID 0xf441
+
+/*
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
@@ -673,6 +679,12 @@
#define INTREPID_NEOVI_PID 0x0701
/*
+ * WICED USB UART
+ */
+#define WICED_VID 0x0A5C
+#define WICED_USB20706V2_PID 0x6422
+
+/*
* Definitions for ID TECH (www.idt-net.com) devices
*/
#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8e07536c233a..bc472584a229 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
+#define TELIT_PRODUCT_LE920A4_1207 0x1207
+#define TELIT_PRODUCT_LE920A4_1208 0x1208
+#define TELIT_PRODUCT_LE920A4_1211 0x1211
+#define TELIT_PRODUCT_LE920A4_1212 0x1212
+#define TELIT_PRODUCT_LE920A4_1213 0x1213
+#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -628,6 +634,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1),
+};
+
static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
.sendsetup = BIT(2),
.reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1203,6 +1214,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1966,6 +1987,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index b1b9bac44016..d213cf44a7e4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = usb_register(udriver);
if (rc)
- return rc;
+ goto failed_usb_register;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
+failed_usb_register:
+ kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 9d6320e8ff3e..6e29d053843d 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec *tvc_resp_iov;
+ struct iovec tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = &vq->iov[out];
+ cmd->tvc_resp_iov = vq->iov[out];
cmd->tvc_in_iovs = in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 7487971f9f78..c1010f018bd8 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
- } else {
+ } else if (msg_type == XS_TRANSACTION_END) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;