diff options
Diffstat (limited to 'drivers')
130 files changed, 1415 insertions, 1032 deletions
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2a55380ad730..60bf04b8f103 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -21,6 +21,7 @@ * */ +#include <linux/dmi.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/quirks.h> @@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = { { } /* Terminating entry */ }; +/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { + /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), + }, + }, + {} +}; + #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 @@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf, hdev->send = btusb_send_frame; hdev->notify = btusb_notify; + if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; + #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); if (err) @@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - - /* QCA Rome devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * explicitly request a device reset on resume. - */ - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } #ifdef CONFIG_BT_HCIBTUSB_RTL diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 0438a64b8185..6314dfb02969 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev) dev->clk = devm_clk_get(dev->dev, NULL); - dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", - GPIOD_OUT_LOW); + dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", + GPIOD_OUT_LOW); if (IS_ERR(dev->device_wakeup)) return PTR_ERR(dev->device_wakeup); - dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); + dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", + GPIOD_OUT_LOW); if (IS_ERR(dev->shutdown)) return PTR_ERR(dev->shutdown); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57afad79f55d..8fa850a070e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, size_t size; u32 retry = 3; + if (amdgpu_acpi_pcie_notify_device_ready(adev)) + return -EINVAL; + /* Get the device handle */ handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 13044e66dcaf..561d3312af32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, result = 0; if (*pos < 12) { - early[0] = amdgpu_ring_get_rptr(ring); + early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; early[2] = ring->wptr & ring->buf_mask; for (i = *pos / 4; i < 3 && size; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2eae86bf906..5c26a8e806b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->uvd.idle_work); - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + if (i == adev->uvd.max_handles) + return 0; + } size = amdgpu_bo_size(adev->uvd.vcpu_bo); ptr = adev->uvd.cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bd2c4f727df6..a712f4b285f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -3093,7 +3093,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); schedule_work(&adev->hotplug_work); - DRM_INFO("IH: HPD%d\n", hpd + 1); + DRM_DEBUG("IH: HPD%d\n", hpd + 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index a066c5eda135..a4309698e76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 543101d5a5ed..2095173aaabf 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -31,6 +31,7 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" +#include "amd_pcie.h" #include "amdgpu_powerplay.h" #include "sid.h" #include "si_ih.h" @@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; int bridge_pos, gpu_pos; - u32 speed_cntl, mask, current_data_rate; - int ret, i; + u32 speed_cntl, current_data_rate; + int i; u16 tmp16; if (pci_is_root_bus(adev->pdev->bus)) @@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> LC_CURRENT_DATA_RATE_SHIFT; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate == 2) { DRM_INFO("PCIE gen 3 link speeds already enabled\n"); return; } DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); - } else if (mask & DRM_PCIE_SPEED_50) { + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { if (current_data_rate == 1) { DRM_INFO("PCIE gen 2 link speeds already enabled\n"); return; @@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (!gpu_pos) return; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate != 2) { u16 bridge_cfg, gpu_cfg; u16 bridge_cfg2, gpu_cfg2; @@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); tmp16 &= ~0xf; - if (mask & DRM_PCIE_SPEED_80) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= 3; - else if (mask & DRM_PCIE_SPEED_50) + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= 2; else tmp16 |= 1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index ce675a7f179a..22f0b7ff3ac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -26,6 +26,7 @@ #include "amdgpu_pm.h" #include "amdgpu_dpm.h" #include "amdgpu_atombios.h" +#include "amd_pcie.h" #include "sid.h" #include "r600_dpm.h" #include "si_dpm.h" @@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, } } -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen) -{ - switch (asic_gen) { - case AMDGPU_PCIE_GEN1: - return AMDGPU_PCIE_GEN1; - case AMDGPU_PCIE_GEN2: - return AMDGPU_PCIE_GEN2; - case AMDGPU_PCIE_GEN3: - return AMDGPU_PCIE_GEN3; - default: - if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) - return AMDGPU_PCIE_GEN3; - else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) - return AMDGPU_PCIE_GEN2; - else - return AMDGPU_PCIE_GEN1; - } - return AMDGPU_PCIE_GEN1; -} - static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u) { @@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, table->ACPIState.levels[0].vddc.index, &table->ACPIState.levels[0].std_vddc); } - table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - AMDGPU_PCIE_GEN1); + table->ACPIState.levels[0].gen2PCIE = + (u8)amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + AMDGPU_PCIE_GEN1); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, @@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); /* patch up vddc if necessary */ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, @@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev) struct si_power_info *si_pi; struct atom_clock_dividers dividers; int ret; - u32 mask; si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); if (si_pi == NULL) @@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret) - si_pi->sys_pcie_mask = 0; - else - si_pi->sys_pcie_mask = mask; + si_pi->sys_pcie_mask = + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 862835dc054e..c345e645f1d7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param) !is_mst_root_connector) { /* Downstream Port status changed. */ if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + + if (aconnector->fake_enable) + aconnector->fake_enable = false; + amdgpu_dm_update_connector_after_detect(aconnector); @@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, dst.width = stream->timing.h_addressable; dst.height = stream->timing.v_addressable; - rmx_type = dm_state->scaling; - if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { - if (src.width * dst.height < - src.height * dst.width) { - /* height needs less upscaling/more downscaling */ - dst.width = src.width * - dst.height / src.height; - } else { - /* width needs less upscaling/more downscaling */ - dst.height = src.height * - dst.width / src.width; + if (dm_state) { + rmx_type = dm_state->scaling; + if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { + if (src.width * dst.height < + src.height * dst.width) { + /* height needs less upscaling/more downscaling */ + dst.width = src.width * + dst.height / src.height; + } else { + /* width needs less upscaling/more downscaling */ + dst.height = src.height * + dst.width / src.width; + } + } else if (rmx_type == RMX_CENTER) { + dst = src; } - } else if (rmx_type == RMX_CENTER) { - dst = src; - } - dst.x = (stream->timing.h_addressable - dst.width) / 2; - dst.y = (stream->timing.v_addressable - dst.height) / 2; + dst.x = (stream->timing.h_addressable - dst.width) / 2; + dst.y = (stream->timing.v_addressable - dst.height) / 2; - if (dm_state->underscan_enable) { - dst.x += dm_state->underscan_hborder / 2; - dst.y += dm_state->underscan_vborder / 2; - dst.width -= dm_state->underscan_hborder; - dst.height -= dm_state->underscan_vborder; + if (dm_state->underscan_enable) { + dst.x += dm_state->underscan_hborder / 2; + dst.y += dm_state->underscan_vborder / 2; + dst.width -= dm_state->underscan_hborder; + dst.height -= dm_state->underscan_vborder; + } } stream->src = src; @@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); - goto drm_connector_null; - } - - if (dm_state == NULL) { - DRM_ERROR("dm_state is NULL!\n"); - goto dm_state_null; + return stream; } drm_connector = &aconnector->base; @@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ if (aconnector->mst_port) { dm_dp_mst_dc_sink_create(drm_connector); - goto mst_dc_sink_create_done; + return stream; } if (create_fake_sink(aconnector)) - goto stream_create_fail; + return stream; } stream = dc_create_stream_for_sink(aconnector->dc_sink); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); - goto stream_create_fail; + return stream; } list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { @@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, - dm_state->scaling != RMX_OFF); + dm_state ? (dm_state->scaling != RMX_OFF) : false); } + if (!dm_state) + drm_mode_set_crtcinfo(&mode, 0); + fill_stream_properties_from_drm_display_mode(stream, &mode, &aconnector->base); update_stream_scaling_settings(&mode, dm_state, stream); @@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_connector, aconnector->dc_sink); -stream_create_fail: -dm_state_null: -drm_connector_null: -mst_dc_sink_create_done: + update_stream_signal(stream); + return stream; } @@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) return &state->base; } + +static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; +} + +static int dm_enable_vblank(struct drm_crtc *crtc) +{ + return dm_set_vblank(crtc, true); +} + +static void dm_disable_vblank(struct drm_crtc *crtc) +{ + dm_set_vblank(crtc, false); +} + /* Implemented only the options currently availible for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = dm_crtc_reset_state, @@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = dm_crtc_duplicate_state, .atomic_destroy_state = dm_crtc_destroy_state, + .enable_vblank = dm_enable_vblank, + .disable_vblank = dm_disable_vblank, }; static enum drm_connector_status @@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, goto fail; } - stream = dc_create_stream_for_sink(dc_sink); + stream = create_stream_for_sink(aconnector, mode, NULL); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); goto fail; @@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (!dm_plane_state->dc_state) return 0; + if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) + return -EINVAL; + if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) return 0; @@ -4632,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc, bool pflip_needed = !state->allow_modeset; int ret = 0; - if (pflip_needed) - return ret; /* Add new planes */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { @@ -4648,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc, /* Remove any changed/removed planes */ if (!enable) { + if (pflip_needed) + continue; if (!old_plane_crtc) continue; @@ -4679,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc, *lock_and_validation_needed = true; } else { /* Add new planes */ + struct dc_plane_state *dc_new_plane_state; if (drm_atomic_plane_disabling(plane->state, new_plane_state)) continue; @@ -4692,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc, if (!dm_new_crtc_state->stream) continue; + if (pflip_needed) + continue; WARN_ON(dm_new_plane_state->dc_state); - dm_new_plane_state->dc_state = dc_create_plane_state(dc); - - DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", - plane->base.id, new_plane_crtc->base.id); - - if (!dm_new_plane_state->dc_state) { + dc_new_plane_state = dc_create_plane_state(dc); + if (!dc_new_plane_state) { ret = -EINVAL; return ret; } + DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", + plane->base.id, new_plane_crtc->base.id); + ret = fill_plane_attributes( new_plane_crtc->dev->dev_private, - dm_new_plane_state->dc_state, + dc_new_plane_state, new_plane_state, new_crtc_state); - if (ret) + if (ret) { + dc_plane_state_release(dc_new_plane_state); return ret; + } - + /* + * Any atomic check errors that occur after this will + * not need a release. The plane state will be attached + * to the stream, and therefore part of the atomic + * state. It'll be released when the atomic state is + * cleaned. + */ if (!dc_add_plane_to_context( dc, dm_new_crtc_state->stream, - dm_new_plane_state->dc_state, + dc_new_plane_state, dm_state->context)) { + dc_plane_state_release(dc_new_plane_state); ret = -EINVAL; return ret; } + dm_new_plane_state->dc_state = dc_new_plane_state; + /* Tell DC to do a full surface update every time there * is a plane change. Inefficient, but works for now. */ @@ -4737,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc, return ret; } +static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct drm_crtc_state *crtc_state; + + WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); + + drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(plane_state)) + return -EDEADLK; + + crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); + if (crtc->primary == plane && crtc_state->active) { + if (!plane_state->fb) + return -EINVAL; + } + } + return 0; +} + static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -4760,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = dm_atomic_check_plane_state_fb(state, crtc); + if (ret) + goto fail; + if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed) continue; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 1874b6cee6af..422055080df4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.num_crtc > 0) - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; - else - adev->crtc_irq.num_types = 0; + + adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dm_crtc_irq_funcs; adev->pageflip_irq.num_types = adev->mode_info.num_crtc; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f3d87f418d2e..93421dad21bd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + /* + * TODO: Need to further figure out why ddc.algo is NULL while MST port exists + */ + if (!aconnector->port || !aconnector->port->aux.ddc.algo) + return; + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 35e84ed031de..12868c769606 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source( return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); } -void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) +bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) { if (dc == NULL) - return; + return false; - dal_irq_service_set(dc->res_pool->irqs, src, enable); + return dal_irq_service_set(dc->res_pool->irqs, src, enable); } void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index a37428271573..be5546181fa8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) link->link_enc, pipe_ctx->clock_source->id, display_color_depth, - pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, - pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK, + pipe_ctx->stream->signal, stream->phy_pix_clk); if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 95b8dd0e53c6..4d07ffebfd31 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged( return true; } -/* Maximum TMDS single link pixel clock 165MHz */ -#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000 - static void update_stream_engine_usage( struct resource_context *res_ctx, const struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 539c3e0a6292..cd5819789d76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -33,8 +33,7 @@ /******************************************************************************* * Private functions ******************************************************************************/ -#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 -static void update_stream_signal(struct dc_stream_state *stream) +void update_stream_signal(struct dc_stream_state *stream) { struct dc_sink *dc_sink = stream->sink; @@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream) stream->signal = dc_sink->sink_signal; if (dc_is_dvi_signal(stream->signal)) { - if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && - stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) + if (stream->ctx->dc->caps.dual_link_dvi && + stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK && + stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; else stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; @@ -193,6 +193,7 @@ bool dc_stream_set_cursor_attributes( core_dc = stream->ctx->dc; res_ctx = &core_dc->current_state->res_ctx; + stream->cursor_attributes = *attributes; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -204,34 +205,8 @@ bool dc_stream_set_cursor_attributes( continue; - if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) - pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( - pipe_ctx->plane_res.ipp, attributes); - - if (pipe_ctx->plane_res.hubp != NULL && - pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL) - pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( - pipe_ctx->plane_res.hubp, attributes); - - if (pipe_ctx->plane_res.mi != NULL && - pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL) - pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( - pipe_ctx->plane_res.mi, attributes); - - - if (pipe_ctx->plane_res.xfm != NULL && - pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL) - pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( - pipe_ctx->plane_res.xfm, attributes); - - if (pipe_ctx->plane_res.dpp != NULL && - pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL) - pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( - pipe_ctx->plane_res.dpp, attributes->color_format); + core_dc->hwss.set_cursor_attribute(pipe_ctx); } - - stream->cursor_attributes = *attributes; - return true; } @@ -255,21 +230,10 @@ bool dc_stream_set_cursor_position( core_dc = stream->ctx->dc; res_ctx = &core_dc->current_state->res_ctx; + stream->cursor_position = *position; for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; - struct mem_input *mi = pipe_ctx->plane_res.mi; - struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dc_cursor_position pos_cpy = *position; - struct dc_cursor_mi_param param = { - .pixel_clk_khz = stream->timing.pix_clk_khz, - .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz, - .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, - .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, - .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz - }; if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || @@ -278,33 +242,9 @@ bool dc_stream_set_cursor_position( !pipe_ctx->plane_res.ipp) continue; - if (pipe_ctx->plane_state->address.type - == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) - pos_cpy.enable = false; - - if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) - pos_cpy.enable = false; - - - if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL) - ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); - - if (mi != NULL && mi->funcs->set_cursor_position != NULL) - mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); - - if (!hubp) - continue; - - if (hubp->funcs->set_cursor_position != NULL) - hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); - - if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) - dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); - + core_dc->hwss.set_cursor_position(pipe_ctx); } - stream->cursor_position = *position; - return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e2e3c9df79ea..d6d56611604e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -62,6 +62,7 @@ struct dc_caps { bool dcc_const_color; bool dynamic_audio; bool is_apu; + bool dual_link_dvi; }; struct dc_dcc_surface_param { @@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source( struct dc *dc, uint32_t src_id, uint32_t ext_id); -void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); +bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); enum dc_irq_source dc_get_hpd_irq_source_at_index( struct dc *dc, uint32_t link_index); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 01c60f11b2bd..456e4d29eadd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream( */ struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); +void update_stream_signal(struct dc_stream_state *stream); + void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index b73db9e78437..a993279a8f2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -236,6 +236,7 @@ SR(D2VGA_CONTROL), \ SR(D3VGA_CONTROL), \ SR(D4VGA_CONTROL), \ + SR(VGA_TEST_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() @@ -337,6 +338,7 @@ struct dce_hwseq_registers { uint32_t D2VGA_CONTROL; uint32_t D3VGA_CONTROL; uint32_t D4VGA_CONTROL; + uint32_t VGA_TEST_CONTROL; /* MMHUB registers. read only. temporary hack */ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; @@ -493,6 +495,9 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\ + HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ + HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) @@ -583,7 +588,10 @@ struct dce_hwseq_registers { type DCFCLK_GATE_DIS; \ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ type DENTIST_DPPCLK_WDIVIDER; \ - type DENTIST_DISPCLK_WDIVIDER; + type DENTIST_DISPCLK_WDIVIDER; \ + type VGA_TEST_ENABLE; \ + type VGA_TEST_RENDER_START; \ + type D1VGA_MODE_ENABLE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index a266e3f5e75f..e4741f1a2b01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -82,13 +82,6 @@ #define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 #define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 -/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ -#define TMDS_MIN_PIXEL_CLOCK 25000 -/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ -#define TMDS_MAX_PIXEL_CLOCK 165000 -/* For current ASICs pixel clock - 600MHz */ -#define MAX_ENCODER_CLOCK 600000 - enum { DP_MST_UPDATE_MAX_RETRY = 50 }; @@ -683,6 +676,7 @@ void dce110_link_encoder_construct( { struct bp_encoder_cap_info bp_cap_info = {0}; const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; enc110->base.funcs = &dce110_lnk_enc_funcs; enc110->base.ctx = init_data->ctx; @@ -757,15 +751,24 @@ void dce110_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } + /* default to one to mirror Windows behavior */ + enc110->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, + enc110->base.id, &bp_cap_info); + /* Override features with DCE-specific values */ - if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( - enc110->base.ctx->dc_bios, enc110->base.id, - &bp_cap_info)) { + if (BP_RESULT_OK == result) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + } else { + dm_logger_write(enc110->base.ctx->logger, LOG_WARNING, + "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); } } @@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output( struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, - bool hdmi, - bool dual_link, + enum signal_type signal, uint32_t pixel_clock) { struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); @@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output( cntl.engine_id = enc->preferred_engine; cntl.transmitter = enc110->base.transmitter; cntl.pll_id = clock_source; - if (hdmi) { - cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; - cntl.lanes_number = 4; - } else if (dual_link) { - cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + cntl.signal = signal; + if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK) cntl.lanes_number = 8; - } else { - cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + else cntl.lanes_number = 4; - } + cntl.hpd_sel = enc110->base.hpd_source; cntl.pixel_clock = pixel_clock; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 8ca9afe47a2b..0ec3433d34b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output( struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, - bool hdmi, - bool dual_link, + enum signal_type signal, uint32_t pixel_clock); /* enables DP PHY output */ diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3ea43e2a9450..442dd2d93618 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -852,6 +852,7 @@ static bool construct( dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; + dc->caps.dual_link_dvi = true; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 86cdd7b4811f..6f382a3ac90f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->sink->link; - /* 1. update AVI info frame (HDMI, DP) - * we always need to update info frame - */ + uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; - /* TODOFPGA may change to hwss.update_info_frame */ + /* For MST, there are multiply stream go to only one link. + * connect DIG back_end to front_end while enable_stream and + * disconnect them during disable_stream + * BY this, it is logic clean to separate stream and link */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + + /* update AVI info frame (HDMI, DP)*/ + /* TODO: FPGA may change to hwss.update_info_frame */ dce110_update_info_frame(pipe_ctx); + /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = timing->h_addressable @@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); } - /* For MST, there are multiply stream go to only one link. - * connect DIG back_end to front_end while enable_stream and - * disconnect them during disable_stream - * BY this, it is logic clean to separate stream and link */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + + } @@ -1690,9 +1693,13 @@ static void apply_min_clocks( * Check if FBC can be enabled */ static bool should_enable_fbc(struct dc *dc, - struct dc_state *context) + struct dc_state *context, + uint32_t *pipe_idx) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; + uint32_t i; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &context->res_ctx; + ASSERT(dc->fbc_compressor); @@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc, if (context->stream_count != 1) return false; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (res_ctx->pipe_ctx[i].stream) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + *pipe_idx = i; + break; + } + } + /* Only supports eDP */ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) return false; @@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc, static void enable_fbc(struct dc *dc, struct dc_state *context) { - if (should_enable_fbc(dc, context)) { + uint32_t pipe_idx = 0; + + if (should_enable_fbc(dc, context, &pipe_idx)) { /* Program GRPH COMPRESSED ADDRESS and PITCH */ struct compr_addr_and_pitch_params params = {0, 0, 0}; struct compressor *compr = dc->fbc_compressor; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; + params.source_view_width = pipe_ctx->stream->timing.h_addressable; params.source_view_height = pipe_ctx->stream->timing.v_addressable; @@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx, } } +void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) +{ + struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; + struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; + struct mem_input *mi = pipe_ctx->plane_res.mi; + struct dc_cursor_mi_param param = { + .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, + .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, + .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, + .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, + .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz + }; + + if (pipe_ctx->plane_state->address.type + == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pos_cpy.enable = false; + + if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) + pos_cpy.enable = false; + + if (ipp->funcs->ipp_cursor_set_position) + ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m); + if (mi->funcs->set_cursor_position) + mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); +} + +void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +{ + struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; + + if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes) + pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes( + pipe_ctx->plane_res.ipp, attributes); + + if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes) + pipe_ctx->plane_res.mi->funcs->set_cursor_attributes( + pipe_ctx->plane_res.mi, attributes); + + if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes) + pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes( + pipe_ctx->plane_res.xfm, attributes); +} + static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} static void optimize_shared_resources(struct dc *dc) {} @@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { .edp_backlight_control = hwss_edp_backlight_control, .edp_power_control = hwss_edp_power_control, .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, + .set_cursor_position = dce110_set_cursor_position, + .set_cursor_attribute = dce110_set_cursor_attribute }; void dce110_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 7c4779578fb7..00f18c485e1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth( return result; } +enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, + struct dc_caps *caps) +{ + if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || + ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + static bool dce110_validate_surface_sets( struct dc_state *context) { @@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets( plane->src_rect.height > 1080)) return false; + /* we don't have the logic to support underlay + * only yet so block the use case where we get + * NV12 plane as top layer + */ + if (j == 0) + return false; + /* irrespective of plane format, * stream should be RGB encoded */ @@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = { .link_enc_create = dce110_link_encoder_create, .validate_guaranteed = dce110_validate_guaranteed, .validate_bandwidth = dce110_validate_bandwidth, + .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 663e0a047a4b..98d9cd0109e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -1103,6 +1103,8 @@ static bool construct( dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; + dc->caps.dual_link_dvi = true; + /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 57cd67359567..5aab01db28ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -835,6 +835,8 @@ static bool construct( dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; + dc->caps.dual_link_dvi = true; + dc->debug = debug_defaults; /************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 8f2bd56f3461..25d7eb1567ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -793,6 +793,7 @@ static bool dce80_construct( dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; + dc->caps.dual_link_dvi = true; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 82572863acab..072e4485e85e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -238,10 +238,24 @@ static void enable_power_gating_plane( static void disable_vga( struct dce_hwseq *hws) { + unsigned int in_vga_mode = 0; + + REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode); + + if (in_vga_mode == 0) + return; + REG_WRITE(D1VGA_CONTROL, 0); - REG_WRITE(D2VGA_CONTROL, 0); - REG_WRITE(D3VGA_CONTROL, 0); - REG_WRITE(D4VGA_CONTROL, 0); + + /* HW Engineer's Notes: + * During switch from vga->extended, if we set the VGA_TEST_ENABLE and + * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly. + * + * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset + * VGA_TEST_ENABLE, to leave it in the same state as before. + */ + REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1); + REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); } static void dpp_pg_control( @@ -1761,6 +1775,11 @@ static void update_dchubp_dpp( &pipe_ctx->plane_res.scl_data.viewport_c); } + if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + } + if (plane_state->update_flags.bits.full_update) { /*gamut remap*/ program_gamut_remap(pipe_ctx); @@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating( return true; } -void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) } } -void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) { if (hws->ctx->dc->res_pool->hubbub != NULL) hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); } +static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +{ + struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_cursor_mi_param param = { + .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, + .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, + .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x, + .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width, + .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz + }; + + if (pipe_ctx->plane_state->address.type + == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pos_cpy.enable = false; + + if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) + pos_cpy.enable = false; + + hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); + dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); +} + +static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +{ + struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; + + pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( + pipe_ctx->plane_res.hubp, attributes); + pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( + pipe_ctx->plane_res.dpp, attributes->color_format); +} + static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = program_gamut_remap, .program_csc_matrix = program_csc_matrix, @@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .edp_backlight_control = hwss_edp_backlight_control, .edp_power_control = hwss_edp_power_control, .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 0fd329deacd8..54d8a1386142 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -123,8 +123,7 @@ struct link_encoder_funcs { void (*enable_tmds_output)(struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, - bool hdmi, - bool dual_link, + enum signal_type signal, uint32_t pixel_clock); void (*enable_dp_output)(struct link_encoder *enc, const struct dc_link_settings *link_settings, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 4c0aa56f7bae..379c6ecd271a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -198,6 +198,9 @@ struct hw_sequencer_funcs { bool enable); void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); + void (*set_cursor_position)(struct pipe_ctx *pipe); + void (*set_cursor_attribute)(struct pipe_ctx *pipe); + }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index f7e40b292dfb..d3e1923b01a8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -217,7 +217,7 @@ bool dce110_vblank_set( core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; if (enable) { - if (!tg->funcs->arm_vert_intr(tg, 2)) { + if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { DC_ERROR("Failed to get VBLANK!\n"); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c index 57a54a7b89e5..1c079ba37c30 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c @@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output( struct link_encoder *enc, enum clock_source_id clock_source, enum dc_color_depth color_depth, - bool hdmi, - bool dual_link, + enum signal_type signal, uint32_t pixel_clock) {} static void virtual_link_encoder_enable_dp_output( diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 7a9b43f84a31..36bbad594267 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -419,11 +419,6 @@ struct bios_event_info { bool backlight_changed; }; -enum { - HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000, - TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000 -}; - /* * DFS-bypass flag */ diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index b5ebde642207..199c5db67cbc 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -26,6 +26,11 @@ #ifndef __DC_SIGNAL_TYPES_H__ #define __DC_SIGNAL_TYPES_H__ +/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */ +#define TMDS_MIN_PIXEL_CLOCK 25000 +/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */ +#define TMDS_MAX_PIXEL_CLOCK 165000 + enum signal_type { SIGNAL_TYPE_NONE = 0L, /* no signal */ SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd89abd2263d..66ee9d888d16 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3205,8 +3205,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { + i915_gem_reset_prepare_engine(engine); engine->submit_request = nop_submit_request; + } /* * Make sure no one is running the old callback before we proceed with @@ -3244,6 +3246,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_engine_init_global_seqno(engine, intel_engine_last_submit(engine)); spin_unlock_irqrestore(&engine->timeline->lock, flags); + + i915_gem_reset_finish_engine(engine); } set_bit(I915_WEDGED, &i915->gpu_error.flags); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0be50e43507d..f8fe5ffcdcff 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) */ mutex_lock(&dev_priv->drm.struct_mutex); dev_priv->perf.oa.exclusive_stream = NULL; - mutex_unlock(&dev_priv->drm.struct_mutex); - dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); free_oa_buffer(dev_priv); @@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr * Note: it's only the RCS/Render context that has any OA state. */ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config, - bool interruptible) + const struct i915_oa_config *oa_config) { struct i915_gem_context *ctx; int ret; unsigned int wait_flags = I915_WAIT_LOCKED; - if (interruptible) { - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ret; - - wait_flags |= I915_WAIT_INTERRUPTIBLE; - } else { - mutex_lock(&dev_priv->drm.struct_mutex); - } + lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Switch away from any user context. */ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); @@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, } out: - mutex_unlock(&dev_priv->drm.struct_mutex); - return ret; } @@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen8_configure_all_contexts(dev_priv, oa_config, true); + ret = gen8_configure_all_contexts(dev_priv, oa_config); if (ret) return ret; @@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) { /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(dev_priv, NULL, false); + gen8_configure_all_contexts(dev_priv, NULL); I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & ~GT_NOA_ENABLE)); @@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) { /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(dev_priv, NULL, false); + gen8_configure_all_contexts(dev_priv, NULL); /* Make sure we disable noa to save power. */ I915_WRITE(RPM_CONFIG1, @@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (ret) goto err_oa_buf_alloc; + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + goto err_lock; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, stream->oa_config); if (ret) @@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->ops = &i915_oa_stream_ops; - /* Lock device for exclusive_stream access late because - * enable_metric_set() might lock as well on gen8+. - */ - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - goto err_lock; - dev_priv->perf.oa.exclusive_stream = stream; mutex_unlock(&dev_priv->drm.struct_mutex); return 0; -err_lock: +err_enable: dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); -err_enable: +err_lock: free_oa_buffer(dev_priv); err_oa_buf_alloc: diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7ece2f061b9e..e0fca035ff78 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) struct rb_node *rb; unsigned long flags; + GEM_TRACE("%s\n", engine->name); + spin_lock_irqsave(&engine->timeline->lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ @@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) */ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* Mark all CS interrupts as complete */ + execlists->active = 0; + spin_unlock_irqrestore(&engine->timeline->lock, flags); } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index d3045a371a55..7c73bc7e2f85 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev) case CHIP_KAVERI: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 4; - if ((rdev->pdev->device == 0x1304) || - (rdev->pdev->device == 0x1305) || - (rdev->pdev->device == 0x130C) || - (rdev->pdev->device == 0x130F) || - (rdev->pdev->device == 0x1310) || - (rdev->pdev->device == 0x1311) || - (rdev->pdev->device == 0x131C)) { - rdev->config.cik.max_cu_per_sh = 8; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1309) || - (rdev->pdev->device == 0x130A) || - (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313) || - (rdev->pdev->device == 0x131D)) { - rdev->config.cik.max_cu_per_sh = 6; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1306) || - (rdev->pdev->device == 0x1307) || - (rdev->pdev->device == 0x130B) || - (rdev->pdev->device == 0x130E) || - (rdev->pdev->device == 0x1315) || - (rdev->pdev->device == 0x1318) || - (rdev->pdev->device == 0x131B)) { - rdev->config.cik.max_cu_per_sh = 4; - rdev->config.cik.max_backends_per_se = 1; - } else { - rdev->config.cik.max_cu_per_sh = 3; - rdev->config.cik.max_backends_per_se = 1; - } + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_sh_per_se = 1; rdev->config.cik.max_texture_channel_caches = 4; rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 5decae0069d0..78cbc3145e44 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("Disabling the CRTC\n"); + drm_crtc_vblank_off(crtc); + sun4i_tcon_set_status(scrtc->tcon, encoder, false); if (crtc->state->event && !crtc->state->active) { @@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("Enabling the CRTC\n"); sun4i_tcon_set_status(scrtc->tcon, encoder, true); + + drm_crtc_vblank_on(crtc); } static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 023f39bda633..e36004fbe453 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c @@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) { struct sun4i_dclk *dclk = hw_to_dclk(hw); + u32 val = degrees / 120; + + val <<= 28; regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, GENMASK(29, 28), - degrees / 120); + val); return 0; } diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 832f8f9bc47f..b8da5a50a61d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, DRM_DEBUG_DRIVER("Vertical parameters OK\n"); + tcon->dclk_min_div = 6; + tcon->dclk_max_div = 127; rounded_rate = clk_round_rate(tcon->dclk, rate); if (rounded_rate < rate) return MODE_CLOCK_LOW; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index b3960118deb9..2de586b7c98b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel, return; } - if (enabled) + if (enabled) { clk_prepare_enable(clk); - else + } else { + clk_rate_exclusive_put(clk); clk_disable_unprepare(clk); + } } static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, @@ -873,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, return ret; } - /* - * This can only be made optional since we've had DT nodes - * without the LVDS reset properties. - * - * If the property is missing, just disable LVDS, and print a - * warning. - */ - tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); - if (IS_ERR(tcon->lvds_rst)) { - dev_err(dev, "Couldn't get our reset line\n"); - return PTR_ERR(tcon->lvds_rst); - } else if (tcon->lvds_rst) { - has_lvds_rst = true; - reset_control_reset(tcon->lvds_rst); - } else { - has_lvds_rst = false; - } + if (tcon->quirks->supports_lvds) { + /* + * This can only be made optional since we've had DT + * nodes without the LVDS reset properties. + * + * If the property is missing, just disable LVDS, and + * print a warning. + */ + tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); + if (IS_ERR(tcon->lvds_rst)) { + dev_err(dev, "Couldn't get our reset line\n"); + return PTR_ERR(tcon->lvds_rst); + } else if (tcon->lvds_rst) { + has_lvds_rst = true; + reset_control_reset(tcon->lvds_rst); + } else { + has_lvds_rst = false; + } - /* - * This can only be made optional since we've had DT nodes - * without the LVDS reset properties. - * - * If the property is missing, just disable LVDS, and print a - * warning. - */ - if (tcon->quirks->has_lvds_alt) { - tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); - if (IS_ERR(tcon->lvds_pll)) { - if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { - has_lvds_alt = false; + /* + * This can only be made optional since we've had DT + * nodes without the LVDS reset properties. + * + * If the property is missing, just disable LVDS, and + * print a warning. + */ + if (tcon->quirks->has_lvds_alt) { + tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); + if (IS_ERR(tcon->lvds_pll)) { + if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { + has_lvds_alt = false; + } else { + dev_err(dev, "Couldn't get the LVDS PLL\n"); + return PTR_ERR(tcon->lvds_pll); + } } else { - dev_err(dev, "Couldn't get the LVDS PLL\n"); - return PTR_ERR(tcon->lvds_pll); + has_lvds_alt = true; } - } else { - has_lvds_alt = true; } - } - if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { - dev_warn(dev, - "Missing LVDS properties, Please upgrade your DT\n"); - dev_warn(dev, "LVDS output disabled\n"); - can_lvds = false; + if (!has_lvds_rst || + (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { + dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n"); + dev_warn(dev, "LVDS output disabled\n"); + can_lvds = false; + } else { + can_lvds = true; + } } else { - can_lvds = true; + can_lvds = false; } ret = sun4i_tcon_init_clocks(dev, tcon); @@ -1137,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = { }; static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { - /* nothing is supported */ + .supports_lvds = true, }; static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index b761c7b823c5..278700c7bf9f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -175,6 +175,7 @@ struct sun4i_tcon_quirks { bool has_channel_1; /* a33 does not have channel 1 */ bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ bool needs_de_be_mux; /* sun6i needs mux to select backend */ + bool supports_lvds; /* Does the TCON support an LVDS output? */ /* callback to handle tcon muxing options */ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, dst_release(dst); } - if (ndev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip(dst_in, addr); - /* - * Put the loopback device and get the translated - * device instead. - */ + if (ndev) { + if (ndev->flags & IFF_LOOPBACK) + ret = rdma_translate_ip(dst_in, addr); + else + addr->bound_dev_if = ndev->ifindex; dev_put(ndev); - ndev = dev_get_by_index(addr->net, addr->bound_dev_if); - } else { - addr->bound_dev_if = ndev->ifindex; } - dev_put(ndev); return ret; } diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -17,6 +17,7 @@ /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 +#define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 @@ -25,18 +26,18 @@ #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) -static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, + int batch) { int i, n, completed = 0; - struct ib_wc *wcs = poll_wc ? : cq->wc; /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ - while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, - budget - completed), wcs)) > 0) { + while ((n = ib_poll_cq(cq, min_t(u32, batch, + budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; @@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) completed += n; - if (n != IB_POLL_BATCH || - (budget != -1 && completed >= budget)) + if (n != batch || (budget != -1 && completed >= budget)) break; } @@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { - struct ib_wc wcs[IB_POLL_BATCH]; + struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; - return __ib_process_cq(cq, budget, wcs); + return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); @@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) struct ib_cq *cq = container_of(iop, struct ib_cq, iop); int completed; - completed = __ib_process_cq(cq, budget, NULL); + completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) @@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; - completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); + completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, + IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(ib_comp_wq, &cq->work); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - goto cache_cleanup; + goto cg_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - goto cache_cleanup; + goto cg_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, mutex_unlock(&device_mutex); return 0; +cg_cleanup: + ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, resolved_dev = dev_get_by_index(dev_addr.net, dev_addr.bound_dev_if); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); + if (!resolved_dev) { + dev_put(idev); + return -ENODEV; } ndev = ib_get_ndev_from_path(rec); rcu_read_lock(); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..3a9d0f5b5881 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 643174d949a8..0dd75f449872 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) return 0; } -static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) { unsigned long flags; @@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) return flags; } -static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, - unsigned long flags) +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, + unsigned long flags) __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) { if (qp->rcq != qp->scq) @@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, int status; union ib_gid sgid; struct ib_gid_attr sgid_attr; + unsigned int flags; u8 nw_type; qp->qplib_qp.modify_flags = 0; @@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, dev_dbg(rdev_to_dev(rdev), "Move QP = %p to flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { dev_dbg(rdev_to_dev(rdev), "Move QP = %p out of flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { @@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + /* Need unconditional fence for local invalidate + * opcode to work as expected. + */ + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; - if (wr->send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - if (wr->wr.send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + /* Need unconditional fence for reg_mr + * opcode to function as expected. + */ + + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b88a48d43a9d..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata); int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); #endif /* __BNXT_RE_IB_VERBS_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 33a448036c2e..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct bnxt_re_qp *qp) { struct ib_event event; + unsigned int flags; + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } memset(&event, 0, sizeof(event)); if (qp->qplib_qp.srq) { @@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work) switch (re_work->event) { case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register with IB: %#x", rc); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } break; case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3ea5b9624f6b..06b42c880fd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) } } -void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) +static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) { - spin_lock_irqsave(&qp->scq->hwq.lock, *flags); + spin_lock_irqsave(&qp->scq->flush_lock, *flags); if (qp->scq == qp->rcq) - __acquire(&qp->rcq->hwq.lock); + __acquire(&qp->rcq->flush_lock); else - spin_lock(&qp->rcq->hwq.lock); + spin_lock(&qp->rcq->flush_lock); } -void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) +static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) { if (qp->scq == qp->rcq) - __release(&qp->rcq->hwq.lock); + __release(&qp->rcq->flush_lock); else - spin_unlock(&qp->rcq->hwq.lock); - spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); -} - -static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - if (qp->scq == qp->rcq) - buddy_cq = NULL; - else if (qp->scq == cq) - buddy_cq = qp->rcq; - else - buddy_cq = qp->scq; - return buddy_cq; -} - -static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __acquires(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __acquire(&cq->hwq.lock); - else - spin_lock(&buddy_cq->hwq.lock); -} - -static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __releases(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __release(&cq->hwq.lock); - else - spin_unlock(&buddy_cq->hwq.lock); + spin_unlock(&qp->rcq->flush_lock); + spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); } void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) @@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __clean_cq(qp->scq, (u64)(unsigned long)qp); qp->sq.hwq.prod = 0; qp->sq.hwq.cons = 0; @@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) qp->rq.hwq.cons = 0; __bnxt_qplib_del_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void bnxt_qpn_cqn_sched_task(struct work_struct *work) @@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; bnxt_qplib_cancel_phantom_processing(qp); - - /* Add qp to flush list of the CQ */ - __bnxt_qplib_add_flush_qp(qp); } /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) @@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; - bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + /* Add qp to flush list of the CQ */ + bnxt_qplib_add_flush_qp(qp); } else { if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { /* Before we complete, do WA 9060 */ @@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } done: @@ -2501,11 +2454,9 @@ done: bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; bool rc = true; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; sw_cons = HWQ_CMP(raw_cons, &cq->hwq); hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; @@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) /* Check for Valid bit. If the CQE is valid, return false */ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); - spin_unlock_irqrestore(&cq->hwq.lock, flags); return rc; } @@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2719,9 +2667,7 @@ do_rq: */ /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); done: return rc; } @@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, u32 budget = num_cqes; unsigned long flags; - spin_lock_irqsave(&cq->hwq.lock, flags); + spin_lock_irqsave(&cq->flush_lock, flags); list_for_each_entry(qp, &cq->sqf_head, sq_flush) { dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: Flushing SQ QP= %p", @@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, qp); __flush_rq(&qp->rq, qp, &cqe, &budget); } - spin_unlock_irqrestore(&cq->hwq.lock, flags); + spin_unlock_irqrestore(&cq->flush_lock, flags); return num_cqes - budget; } @@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; int budget, rc = 0; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; budget = num_cqes; @@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); } exit: - spin_unlock_irqrestore(&cq->hwq.lock, flags); return num_cqes - budget; } void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) { - unsigned long flags; - - spin_lock_irqsave(&cq->hwq.lock, flags); if (arm_type) bnxt_qplib_arm_cq(cq, arm_type); /* Using cq->arm_state variable to track whether to issue cq handler */ atomic_set(&cq->arm_state, 1); - spin_unlock_irqrestore(&cq->hwq.lock, flags); } void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ca0a2ffa3509..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -389,6 +389,18 @@ struct bnxt_qplib_cq { struct list_head sqf_head, rqf_head; atomic_t arm_state; spinlock_t compl_lock; /* synch CQ handlers */ +/* Locking Notes: + * QP can move to error state from modify_qp, async error event or error + * CQE as part of poll_cq. When QP is moved to error state, it gets added + * to two flush lists, one each for SQ and RQ. + * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq + * flush_locks should be acquired when QP is moved to error. The control path + * operations(modify_qp and async error events) are synchronized with poll_cq + * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. + * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq + * of the same QP while manipulating the flush list. + */ + spinlock_t flush_lock; /* QP flush management */ }; #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, err_event->res_err_state_reason); if (!qp) break; - bnxt_qplib_acquire_cq_locks(qp, &flags); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + rcfw->aeq_handler(rcfw, qp_event, qp); break; default: /* Command Response */ @@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); /* * VFs need not setup the HW context area, PF * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@ #define RCFW_COMM_SIZE 0x104 #define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ do { \ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 03057983341f..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_inline_data = le32_to_cpu(sb->max_inline_data); - attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = le32_to_cpu(sb->max_gid); bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - __le16 reserved16; + /* This value is (log-base-2-of-DBR-page-size - 12). + * 0 for 4KB. HW supported values are enumerated below. + */ + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M __le64 qpc_page_dir; __le64 mrw_page_dir; __le64 srq_page_dir; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct wc->dlid_path_bits = 0; if (is_eth) { + wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -851,7 +852,6 @@ repoll: } } - wc->slid = be16_to_cpu(cqe->rlid); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -860,6 +860,7 @@ repoll: wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { + wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -871,6 +872,7 @@ repoll: memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { + wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; - else - memset(&gid_tbl[i].gid, 0, 12); } } @@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, if (!gids) { ret = -ENOMEM; } else { - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { + memcpy(&gids[i].gid, + &port_gid_table->gids[i].gid, + sizeof(union ib_gid)); + gids[i].gid_type = + port_gid_table->gids[i].gid_type; + } } } spin_unlock_bh(&iboe->lock); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { @@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..033b6af90de9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; + if (!mlx5_core_mp_enabled(ibdev->mdev) || + ll != IB_LINK_LAYER_ETHERNET) { + if (native_port_num) + *native_port_num = ib_port_num; + return ibdev->mdev; + } + if (native_port_num) *native_port_num = 1; - if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) - return ibdev->mdev; - port = &ibdev->port[ib_port_num - 1]; if (!port) return NULL; @@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = 0; + u8 port = (u8)work->param; if (mlx5_core_is_mp_slave(work->dev)) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_INITIALIZED: - port = (u8)work->param; - /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ @@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = (u8)work->param; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = (u8)work->param; break; case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ibev.device = &ibdev->ib_dev; ibev.element.port_num = port; - if (port < 1 || port > ibdev->num_ports) { + if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); goto out; } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..1961c6a45437 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..36197fbac63a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 uidx = MLX5_IB_DEFAULT_UIDX; struct mlx5_ib_create_qp ucmd; struct mlx5_ib_qp_base *base; + int mlx5_st; void *qpc; u32 *in; int err; @@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); + mlx5_st = to_mlx5_st(init_attr->qp_type); + if (mlx5_st < 0) + return -EINVAL; + if (init_attr->rwq_ind_tbl) { if (!udata) return -ENOSYS; @@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, st, mlx5_st); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) @@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, } return -EINVAL; } - neigh = dst_neigh_lookup(dst, &dst_in); - + neigh = dst_neigh_lookup(dst, &fl6.daddr); if (neigh) { rcu_read_lock(); if (neigh->nud_state & NUD_VALID) { @@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) qp = idr_find(&dev->qpidr, conn_param->qpn); - laddr = (struct sockaddr_in *)&cm_id->local_addr; - raddr = (struct sockaddr_in *)&cm_id->remote_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; - raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + + DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", + ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), + ntohs(raddr->sin_port)); DP_DEBUG(dev, QEDR_MSG_IWARP, "Connect source address: %pISpc, remote address: %pISpc\n", @@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) int rc; int i; - laddr = (struct sockaddr_in *)&cm_id->local_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; DP_DEBUG(dev, QEDR_MSG_IWARP, "Create Listener address: %pISpc\n", &cm_id->local_addr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; @@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; @@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); - union rdma_cqe *cqe = cq->latest_cqe; + union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; @@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); + cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); + spin_lock_irq(&keypad->lock); keypad->stopped = true; - mb(); + spin_unlock_irq(&keypad->lock); + flush_work(&keypad->work.work); /* * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3d2e23a0ae39..a246fc686bb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = { "LEN0046", /* X250 */ "LEN004a", /* W541 */ "LEN200f", /* T450s */ - "LEN2018", /* T460p */ NULL }; diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index db4f6bb502e3..a5ab774da4cc 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -1,11 +1,8 @@ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ +// SPDX-License-Identifier: GPL-2.0 +// Melfas MMS114/MMS152 touchscreen device driver +// +// Copyright (c) 2012 Samsung Electronics Co., Ltd. +// Author: Joonyoung Shim <jy0922.shim@samsung.com> #include <linux/module.h> #include <linux/delay.h> @@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver); /* Module information */ MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 414c9af54ded..aa2032fa80d4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -386,9 +386,6 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { - unsigned noio_flag; - void *ptr; - if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, * all allocations done by this process (including pagetables) are done * as if GFP_NOIO was specified. */ + if (gfp_mask & __GFP_NORETRY) { + unsigned noio_flag = memalloc_noio_save(); + void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - if (gfp_mask & __GFP_NORETRY) - noio_flag = memalloc_noio_save(); - - ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - - if (gfp_mask & __GFP_NORETRY) memalloc_noio_restore(noio_flag); + return ptr; + } - return ptr; + return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); } /* diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7d3e572072f5..3fde9e9faddd 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -22,6 +22,7 @@ #include <linux/time.h> #include <linux/workqueue.h> #include <linux/delay.h> +#include <scsi/scsi_device.h> #include <scsi/scsi_dh.h> #include <linux/atomic.h> #include <linux/blk-mq.h> @@ -211,25 +212,13 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) else m->queue_mode = DM_TYPE_REQUEST_BASED; - } else if (m->queue_mode == DM_TYPE_BIO_BASED || - m->queue_mode == DM_TYPE_NVME_BIO_BASED) { + } else if (m->queue_mode == DM_TYPE_BIO_BASED) { INIT_WORK(&m->process_queued_bios, process_queued_bios); - - if (m->queue_mode == DM_TYPE_BIO_BASED) { - /* - * bio-based doesn't support any direct scsi_dh management; - * it just discovers if a scsi_dh is attached. - */ - set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); - } - } - - if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { - set_bit(MPATHF_QUEUE_IO, &m->flags); - atomic_set(&m->pg_init_in_progress, 0); - atomic_set(&m->pg_init_count, 0); - m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; - init_waitqueue_head(&m->pg_init_wait); + /* + * bio-based doesn't support any direct scsi_dh management; + * it just discovers if a scsi_dh is attached. + */ + set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); } dm_table_set_type(ti->table, m->queue_mode); @@ -337,14 +326,12 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) { m->current_pg = pg; - if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) - return; - /* Must we initialise the PG first, and queue I/O till it's ready? */ if (m->hw_handler_name) { set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); set_bit(MPATHF_QUEUE_IO, &m->flags); } else { + /* FIXME: not needed if no scsi_dh is attached */ clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); clear_bit(MPATHF_QUEUE_IO, &m->flags); } @@ -385,8 +372,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) unsigned bypassed = 1; if (!atomic_read(&m->nr_valid_paths)) { - if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) - clear_bit(MPATHF_QUEUE_IO, &m->flags); + clear_bit(MPATHF_QUEUE_IO, &m->flags); goto failed; } @@ -599,7 +585,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) return pgpath; } -static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) +static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) { struct pgpath *pgpath; unsigned long flags; @@ -634,8 +620,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, { struct pgpath *pgpath; - if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) - pgpath = __map_bio_nvme(m, bio); + if (!m->hw_handler_name) + pgpath = __map_bio_fast(m, bio); else pgpath = __map_bio(m, bio); @@ -675,8 +661,7 @@ static void process_queued_io_list(struct multipath *m) { if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); - else if (m->queue_mode == DM_TYPE_BIO_BASED || - m->queue_mode == DM_TYPE_NVME_BIO_BASED) + else if (m->queue_mode == DM_TYPE_BIO_BASED) queue_work(kmultipathd, &m->process_queued_bios); } @@ -838,6 +823,16 @@ retain: */ kfree(m->hw_handler_name); m->hw_handler_name = attached_handler_name; + + /* + * Init fields that are only used when a scsi_dh is attached + */ + if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) { + atomic_set(&m->pg_init_in_progress, 0); + atomic_set(&m->pg_init_count, 0); + m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; + init_waitqueue_head(&m->pg_init_wait); + } } } @@ -873,6 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps int r; struct pgpath *p; struct multipath *m = ti->private; + struct scsi_device *sdev; /* we need at least a path arg */ if (as->argc < 1) { @@ -891,7 +887,9 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps goto bad; } - if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { + sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev)); + if (sdev) { + put_device(&sdev->sdev_gendev); INIT_DELAYED_WORK(&p->activate_path, activate_path_work); r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); if (r) { @@ -1001,8 +999,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) if (!hw_argc) return 0; - if (m->queue_mode == DM_TYPE_BIO_BASED || - m->queue_mode == DM_TYPE_NVME_BIO_BASED) { + if (m->queue_mode == DM_TYPE_BIO_BASED) { dm_consume_args(as, hw_argc); DMERR("bio-based multipath doesn't allow hardware handler args"); return 0; @@ -1091,8 +1088,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) if (!strcasecmp(queue_mode_name, "bio")) m->queue_mode = DM_TYPE_BIO_BASED; - else if (!strcasecmp(queue_mode_name, "nvme")) - m->queue_mode = DM_TYPE_NVME_BIO_BASED; else if (!strcasecmp(queue_mode_name, "rq")) m->queue_mode = DM_TYPE_REQUEST_BASED; else if (!strcasecmp(queue_mode_name, "mq")) @@ -1193,7 +1188,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->num_discard_bios = 1; ti->num_write_same_bios = 1; ti->num_write_zeroes_bios = 1; - if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) + if (m->queue_mode == DM_TYPE_BIO_BASED) ti->per_io_data_size = multipath_per_bio_data_size(); else ti->per_io_data_size = sizeof(struct dm_mpath_io); @@ -1730,9 +1725,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, case DM_TYPE_BIO_BASED: DMEMIT("queue_mode bio "); break; - case DM_TYPE_NVME_BIO_BASED: - DMEMIT("queue_mode nvme "); - break; case DM_TYPE_MQ_REQUEST_BASED: DMEMIT("queue_mode mq "); break; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7ef469e902c6..c1d1034ff7b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); } else { - if (test_bit(MD_RECOVERY_NEEDED, &recovery) || - test_bit(MD_RECOVERY_RESHAPE, &recovery) || - test_bit(MD_RECOVERY_RUNNING, &recovery)) + if (!test_bit(MD_RECOVERY_INTR, &recovery) && + (test_bit(MD_RECOVERY_NEEDED, &recovery) || + test_bit(MD_RECOVERY_RESHAPE, &recovery) || + test_bit(MD_RECOVERY_RUNNING, &recovery))) r = mddev->curr_resync_completed; else r = mddev->recovery_cp; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..7eb3e2a3c07d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t) if (t->type != DM_TYPE_NONE) { /* target already set the table's type */ - if (t->type == DM_TYPE_BIO_BASED) - return 0; - else if (t->type == DM_TYPE_NVME_BIO_BASED) { - if (!dm_table_does_not_support_partial_completion(t)) { - DMERR("nvme bio-based is only possible with devices" - " that don't support partial completion"); - return -EINVAL; - } - /* Fallthru, also verify all devices are blk-mq */ + if (t->type == DM_TYPE_BIO_BASED) { + /* possibly upgrade to a variant of bio-based */ + goto verify_bio_based; } BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); + BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); goto verify_rq_based; } @@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t) } if (bio_based) { +verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; if (dm_table_supports_dax(t) || @@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev char b[BDEVNAME_SIZE]; /* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); + return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); } static bool dm_table_does_not_support_partial_completion(struct dm_table *t) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68136806d365..45328d8b2859 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return dm_get_geometry(md, geo); } -static int dm_grab_bdev_for_ioctl(struct mapped_device *md, - struct block_device **bdev, - fmode_t *mode) +static char *_dm_claim_ptr = "I belong to device-mapper"; + +static int dm_get_bdev_for_ioctl(struct mapped_device *md, + struct block_device **bdev, + fmode_t *mode) { struct dm_target *tgt; struct dm_table *map; @@ -490,6 +492,10 @@ retry: goto out; bdgrab(*bdev); + r = blkdev_get(*bdev, *mode, _dm_claim_ptr); + if (r < 0) + goto out; + dm_put_live_table(md, srcu_idx); return r; @@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, struct mapped_device *md = bdev->bd_disk->private_data; int r; - r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); + r = dm_get_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; @@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); out: - bdput(bdev); + blkdev_put(bdev, mode); return r; } @@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) static int open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) { - static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(td->dm_dev.bdev); - bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); + bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); @@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, fmode_t mode; int r; - r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); + r = dm_get_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; @@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, else r = -EOPNOTSUPP; - bdput(bdev); + blkdev_put(bdev, mode); return r; } @@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) fmode_t mode; int r; - r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); + r = dm_get_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; @@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) else r = -EOPNOTSUPP; - bdput(bdev); + blkdev_put(bdev, mode); return r; } @@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, fmode_t mode; int r; - r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); + r = dm_get_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; @@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, else r = -EOPNOTSUPP; - bdput(bdev); + blkdev_put(bdev, mode); return r; } @@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) fmode_t mode; int r; - r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); + r = dm_get_bdev_for_ioctl(md, &bdev, &mode); if (r < 0) return r; @@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) else r = -EOPNOTSUPP; - bdput(bdev); + blkdev_put(bdev, mode); return r; } diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 337462e1569f..038509e5d031 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx, return rc; } +static long afu_ioctl_get_metadata(struct ocxl_context *ctx, + struct ocxl_ioctl_metadata __user *uarg) +{ + struct ocxl_ioctl_metadata arg; + + memset(&arg, 0, sizeof(arg)); + + arg.version = 0; + + arg.afu_version_major = ctx->afu->config.version_major; + arg.afu_version_minor = ctx->afu->config.version_minor; + arg.pasid = ctx->pasid; + arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; + arg.global_mmio_size = ctx->afu->config.global_mmio_size; + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} + #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ + x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ "UNKNOWN") static long afu_ioctl(struct file *file, unsigned int cmd, @@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd, irq_fd.eventfd); break; + case OCXL_IOCTL_GET_METADATA: + rc = afu_ioctl_get_metadata(ctx, + (struct ocxl_ioctl_metadata __user *) args); + break; + default: rc = -EINVAL; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f5c87bd35fa1..f27f9bae1a4a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); - /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, ndev); - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. @@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) continue; } + gfar_process_frame(ndev, skb); + /* Increment the number of packets */ total_pkts++; total_bytes += skb->len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); /* Send the packet up the stack */ napi_gro_receive(&rx_queue->grp->napi_rx, skb); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0da5aa2c8aba..9fc063af233c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) trigger_cmd_completions(dev); } - mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); + mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mlx5_core_err(dev, "end\n"); unlock: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), - MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), - MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), - MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), }; -#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 struct mlxsw_afk_element_inst { /* element instance in actual block */ const struct mlxsw_afk_element_info *info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dcc58d61506..c7e941aecc2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; + mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) + if (mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan->ref_count++; return mlxsw_sp_port_vlan; + } return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); } @@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + if (--mlxsw_sp_port_vlan->ref_count != 0) + return; + if (mlxsw_sp_port_vlan->bridge_port) mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); else if (fid) @@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, }; -static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; -static struct devlink_resource_size_params mlxsw_sp_linear_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; -static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; - static void -mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) +mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, + struct devlink_resource_size_params *kvd_size_params, + struct devlink_resource_size_params *linear_size_params, + struct devlink_resource_size_params *hash_double_size_params, + struct devlink_resource_size_params *hash_single_size_params) { u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE); @@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); u32 linear_size_min = 0; - /* KVD top resource */ - mlxsw_sp_kvd_size_params.size_min = kvd_size; - mlxsw_sp_kvd_size_params.size_max = kvd_size; - mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Linear part init */ - mlxsw_sp_linear_size_params.size_min = linear_size_min; - mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - - double_size_min; - mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash double part init */ - mlxsw_sp_hash_double_size_params.size_min = double_size_min; - mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - - linear_size_min; - mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; - - /* Hash single part init */ - mlxsw_sp_hash_single_size_params.size_min = single_size_min; - mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - - linear_size_min; - mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; - mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; + devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(linear_size_params, linear_size_min, + kvd_size - single_size_min - + double_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_double_size_params, + double_size_min, + kvd_size - single_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); + devlink_resource_size_params_init(hash_single_size_params, + single_size_min, + kvd_size - double_size_min - + linear_size_min, + MLXSW_SP_KVD_GRANULARITY, + DEVLINK_RESOURCE_UNIT_ENTRY); } static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params hash_single_size_params; + struct devlink_resource_size_params hash_double_size_params; + struct devlink_resource_size_params linear_size_params; + struct devlink_resource_size_params kvd_size_params; u32 kvd_size, single_size, double_size, linear_size; const struct mlxsw_config_profile *profile; int err; @@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO; - mlxsw_sp_resource_size_params_prepare(mlxsw_core); + mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, + &linear_size_params, + &hash_double_size_params, + &hash_single_size_params); + kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, true, kvd_size, MLXSW_SP_RESOURCE_KVD, DEVLINK_RESOURCE_ID_PARENT_TOP, - &mlxsw_sp_kvd_size_params, + &kvd_size_params, &mlxsw_sp_resource_kvd_ops); if (err) return err; @@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) false, linear_size, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_linear_size_params, + &linear_size_params, &mlxsw_sp_resource_kvd_linear_ops); if (err) return err; @@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) false, double_size, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_double_size_params, + &hash_double_size_params, &mlxsw_sp_resource_kvd_hash_double_ops); if (err) return err; @@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) false, single_size, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD, - &mlxsw_sp_hash_single_size_params, + &hash_single_size_params, &mlxsw_sp_resource_kvd_hash_single_ops); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bdd8f94a452c..4ec1ca3c96c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan { struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; + unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 593ad31be749..161bcdc012f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, action, local_port); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, bool adding, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, lag_vid, lag_id); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, u16 fid, u16 mid_idx, bool adding) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: kfree(sfd_pl); return err; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 92dcf8717fc6..14c839bb09e7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, enum_index); } +static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) +{ + iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ + return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..e5fe70134690 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, return mdp->tsu_addr + mdp->reg_offset[enum_index]; } -static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, - int enum_index) -{ - iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - -static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) -{ - return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); -} - #endif /* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 17e529af79dc..0265d703eb03 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev, if (unlikely(!net_device || net_device->destroy)) return -ENODEV; - /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get - * here before the negotiation with the host is finished and - * send_section_map may not be allocated yet. - */ - if (unlikely(!net_device->send_section_map)) - return -EAGAIN; - nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev, /* Send control message directly without accessing msd (Multi-Send * Data) field which may be changed during data packet processing. */ - if (!skb) { - cur_send = packet; - goto send_now; - } + if (!skb) + return netvsc_send_pkt(device, packet, net_device, pb, skb); /* batch packets in send buffer if possible */ msdp = &nvchan->msd; @@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev, } } -send_now: if (cur_send) ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); @@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) if (send_recv_completions(ndev, net_device, nvchan) == 0 && work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound)) { + hv_end_read(&channel->inbound) && + napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); - napi_reschedule(napi); + __napi_schedule(napi); } /* Driver may overshoot since multiple packets per descriptor */ @@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context) /* disable interupts from host */ hv_begin_read(rbi); - __napi_schedule(&nvchan->napi); + __napi_schedule_irqoff(&nvchan->napi); } } @@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, netvsc_channel_cb, net_device->chan_table); if (ret != 0) { - netif_napi_del(&net_device->chan_table[0].napi); netdev_err(ndev, "unable to open channel: %d\n", ret); goto cleanup; } @@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, napi_enable(&net_device->chan_table[0].napi); - /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is - * populated. - */ - rcu_assign_pointer(net_device_ctx->nvdev, net_device); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, goto close; } + /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is + * populated. + */ + rcu_assign_pointer(net_device_ctx->nvdev, net_device); + return net_device; close: @@ -1329,6 +1319,7 @@ close: vmbus_close(device->channel); cleanup: + netif_napi_del(&net_device->chan_table[0].napi); free_netvsc_device(&net_device->rcu); return ERR_PTR(ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c5584c2d440e..cdb78eefab67 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -66,10 +66,36 @@ static int debug = -1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void netvsc_set_multicast_list(struct net_device *net) +static void netvsc_change_rx_flags(struct net_device *net, int change) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + int inc; + + if (!vf_netdev) + return; + + if (change & IFF_PROMISC) { + inc = (net->flags & IFF_PROMISC) ? 1 : -1; + dev_set_promiscuity(vf_netdev, inc); + } + + if (change & IFF_ALLMULTI) { + inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; + dev_set_allmulti(vf_netdev, inc); + } +} + +static void netvsc_set_rx_mode(struct net_device *net) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); + + if (vf_netdev) { + dev_uc_sync(vf_netdev, net); + dev_mc_sync(vf_netdev, net); + } rndis_filter_update(nvdev); } @@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_wake_all_queues(net); - rdev = nvdev->extension; - - if (!rdev->link_state) + if (!rdev->link_state) { netif_carrier_on(net); + netif_tx_wake_all_queues(net); + } if (vf_netdev) { /* Setting synthetic device up transparently sets @@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, rcu_read_lock(); vf_netdev = rcu_dereference(ndc->vf_netdev); if (vf_netdev) { - txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; + + if (vf_ops->ndo_select_queue) + txq = vf_ops->ndo_select_queue(vf_netdev, skb, + accel_priv, fallback); + else + txq = fallback(vf_netdev, skb); + + /* Record the queue selected by VF so that it can be + * used for common case where VF has more queues than + * the synthetic device. + */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; } else { txq = netvsc_pick_tx(ndev, skb); } @@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = { .ndo_open = netvsc_open, .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, - .ndo_set_rx_mode = netvsc_set_multicast_list, + .ndo_change_rx_flags = netvsc_change_rx_flags, + .ndo_set_rx_mode = netvsc_set_rx_mode, .ndo_change_mtu = netvsc_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, @@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev, netdev_warn(vf_netdev, "unable to change mtu to %u\n", ndev->mtu); + /* set multicast etc flags on VF */ + dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + dev_uc_sync(vf_netdev, ndev); + dev_mc_sync(vf_netdev, ndev); + if (netif_running(ndev)) { ret = dev_open(vf_netdev); if (ret) diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c3ca191fea7f..8927c483c217 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w) { struct rndis_device *rdev = container_of(w, struct rndis_device, mcast_work); + u32 filter = NDIS_PACKET_TYPE_DIRECTED; + unsigned int flags = rdev->ndev->flags; - if (rdev->ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); + if (flags & IFF_PROMISC) { + filter = NDIS_PACKET_TYPE_PROMISCUOUS; + } else { + if (flags & IFF_ALLMULTI) + flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; + if (flags & IFF_BROADCAST) + flags |= NDIS_PACKET_TYPE_BROADCAST; + } + + rndis_filter_set_packet_filter(rdev, filter); } void rndis_filter_update(struct netvsc_device *nvdev) @@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev, { struct rndis_device *rndis_dev = net_dev->extension; + /* Don't try and setup sub channels if about to halt */ + cancel_work_sync(&net_dev->subchan_work); + /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e3e29c2b028b..a6f924fee584 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev) break; case PHY_HALTED: /* if phy was suspended, bring the physical link up again */ - phy_resume(phydev); + __phy_resume(phydev); /* make sure interrupts are re-enabled for the PHY */ if (phy_interrupt_is_valid(phydev)) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d39ae77707ef..478405e544cc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev) if (!mdio_bus_phy_may_suspend(phydev)) goto no_resume; - mutex_lock(&phydev->lock); ret = phy_resume(phydev); - mutex_unlock(&phydev->lock); if (ret < 0) return ret; @@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (err) goto error; - mutex_lock(&phydev->lock); phy_resume(phydev); - mutex_unlock(&phydev->lock); phy_led_triggers_register(phydev); return err; @@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev) } EXPORT_SYMBOL(phy_suspend); -int phy_resume(struct phy_device *phydev) +int __phy_resume(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; @@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev) return ret; } +EXPORT_SYMBOL(__phy_resume); + +int phy_resume(struct phy_device *phydev) +{ + int ret; + + mutex_lock(&phydev->lock); + ret = __phy_resume(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 255a5def56e9..fa2a9bdd1866 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b52258c327d2..7433bb2e4451 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -181,7 +181,6 @@ struct tun_file { struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; - int xdp_pending_pkts; }; struct tun_flow_entry { @@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; + preempt_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_REDIRECT: get_page(alloc_frag->page); alloc_frag->offset += buflen; - ++tfile->xdp_pending_pkts; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; rcu_read_unlock(); + preempt_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + preempt_enable(); return ERR_PTR(-ENOMEM); } @@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); rcu_read_unlock(); + preempt_enable(); return NULL; } rcu_read_unlock(); + preempt_enable(); return skb; @@ -1711,6 +1715,7 @@ err_redirect: put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + preempt_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK, false); - if (tfile->xdp_pending_pkts) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return result; } @@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); - - if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || - !(m->msg_flags & MSG_MORE)) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return ret; } @@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - tfile->xdp_pending_pkts = 0; return 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..fff4b13eece2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -896,6 +896,12 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { + /* Cinterion PLS8 modem by GEMALTO */ + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958b2e8b90f6..86f7196f9d91 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) tx_data += len; agg->skb_len += len; - agg->skb_num++; + agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9bb9e562b893..23374603e4d9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, page_off += *len; while (--*num_buf) { + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); unsigned int buflen; void *buf; int off; @@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, /* guard against a misconfigured or uncooperative backend that * is sending packet larger than the MTU. */ - if ((page_off + buflen) > PAGE_SIZE) { + if ((page_off + buflen + tailroom) > PAGE_SIZE) { put_page(p); goto err_buf; } @@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); bool sent; + int err; head_skb = NULL; @@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *data; u32 act; - /* This happens when rx buffer size is underestimated */ + /* This happens when rx buffer size is underestimated + * or headroom is not enough because of the buffer + * was refilled before XDP is set. This should only + * happen for the first several packets, so we don't + * care much about its performance. + */ if (unlikely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ @@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, act = bpf_prog_run_xdp(xdp_prog, &xdp); - if (act != XDP_PASS) - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); - switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_xdp; rcu_read_unlock(); goto xdp_xmit; + case XDP_REDIRECT: + err = xdp_do_redirect(dev, &xdp, xdp_prog); + if (err) { + if (unlikely(xdp_page != page)) + put_page(xdp_page); + goto err_xdp; + } + *xdp_xmit = true; + if (unlikely(xdp_page != page)) + goto err_xdp; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, } static unsigned int get_mergeable_buf_len(struct receive_queue *rq, - struct ewma_pkt_len *avg_pkt_len) + struct ewma_pkt_len *avg_pkt_len, + unsigned int room) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; - len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), + if (room) + return PAGE_SIZE - room; + + len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), rq->min_buf_len, PAGE_SIZE - hdr_len); + return ALIGN(len, L1_CACHE_BYTES); } @@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, { struct page_frag *alloc_frag = &rq->alloc_frag; unsigned int headroom = virtnet_get_headroom(vi); + unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); char *buf; void *ctx; int err; unsigned int len, hole; - len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); - if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) + /* Extra tailroom is needed to satisfy XDP's assumption. This + * means rx frags coalescing won't work, but consider we've + * disabled GSO for XDP, it won't be a big issue. + */ + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); + if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) return -ENOMEM; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ get_page(alloc_frag->page); - alloc_frag->offset += len + headroom; + alloc_frag->offset += len + room; hole = alloc_frag->size - alloc_frag->offset; - if (hole < len + headroom) { + if (hole < len + room) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. @@ -2185,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); + if (netif_running(dev)) + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); @@ -2205,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (old_prog) bpf_prog_put(old_prog); - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (netif_running(dev)) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; @@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); + unsigned int headroom = virtnet_get_headroom(vi); + unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; struct ewma_pkt_len *avg; BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; return sprintf(buf, "%u\n", - get_mergeable_buf_len(&vi->rq[queue_index], avg)); + get_mergeable_buf_len(&vi->rq[queue_index], avg, + SKB_DATA_ALIGN(headroom + tailroom))); } static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index afeca6bcdade..ab8b3cbbb205 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 8de2d5c69b1d..dc9303abda42 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); val &= 0xff000000; - val |= 0x00010100; + val |= 0x00ff0100; dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); /* setup command register */ diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 0c2ed11c0603..f63db346c219 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -638,7 +638,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) if (irq_is_percpu_devid(irq)) disable_percpu_irq(irq); else - disable_irq(irq); + disable_irq_nosync(irq); } per_cpu(cpu_armpmu, cpu) = NULL; diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 6dec6ab13300..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev) return ret; } -static const struct chromeos_laptop samsung_series_5_550 = { +static struct chromeos_laptop samsung_series_5_550 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, @@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = { }, }; -static const struct chromeos_laptop samsung_series_5 = { +static struct chromeos_laptop samsung_series_5 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, }, }; -static const struct chromeos_laptop chromebook_pixel = { +static struct chromeos_laptop chromebook_pixel = { .i2c_peripherals = { /* Touch Screen. */ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, @@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = { }, }; -static const struct chromeos_laptop hp_chromebook_14 = { +static struct chromeos_laptop hp_chromebook_14 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, }, }; -static const struct chromeos_laptop dell_chromebook_11 = { +static struct chromeos_laptop dell_chromebook_11 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, @@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = { }, }; -static const struct chromeos_laptop toshiba_cb35 = { +static struct chromeos_laptop toshiba_cb35 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, }, }; -static const struct chromeos_laptop acer_c7_chromebook = { +static struct chromeos_laptop acer_c7_chromebook = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, }, }; -static const struct chromeos_laptop acer_ac700 = { +static struct chromeos_laptop acer_ac700 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, }, }; -static const struct chromeos_laptop acer_c720 = { +static struct chromeos_laptop acer_c720 = { .i2c_peripherals = { /* Touchscreen. */ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, @@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = { }, }; -static const struct chromeos_laptop hp_pavilion_14_chromebook = { +static struct chromeos_laptop hp_pavilion_14_chromebook = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, }, }; -static const struct chromeos_laptop cr48 = { +static struct chromeos_laptop cr48 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 9a8f96465cdc..d10ffe51da24 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -105,31 +105,44 @@ config ASUS_LAPTOP If you have an ACPI-compatible ASUS laptop, say Y or M here. +# +# If the DELL_SMBIOS_SMM feature is enabled, the DELL_SMBIOS driver +# becomes dependent on the DCDBAS driver. The "depends" line prevents a +# configuration where DELL_SMBIOS=y while DCDBAS=m. +# config DELL_SMBIOS - tristate + tristate "Dell SMBIOS driver" + depends on DCDBAS || DCDBAS=n + ---help--- + This provides support for the Dell SMBIOS calling interface. + If you have a Dell computer you should enable this option. + + Be sure to select at least one backend for it to work properly. config DELL_SMBIOS_WMI - tristate "Dell SMBIOS calling interface (WMI implementation)" + bool "Dell SMBIOS driver WMI backend" + default y depends on ACPI_WMI select DELL_WMI_DESCRIPTOR - select DELL_SMBIOS + depends on DELL_SMBIOS ---help--- This provides an implementation for the Dell SMBIOS calling interface communicated over ACPI-WMI. - If you have a Dell computer from >2007 you should say Y or M here. + If you have a Dell computer from >2007 you should say Y here. If you aren't sure and this module doesn't work for your computer it just won't load. config DELL_SMBIOS_SMM - tristate "Dell SMBIOS calling interface (SMM implementation)" + bool "Dell SMBIOS driver SMM backend" + default y depends on DCDBAS - select DELL_SMBIOS + depends on DELL_SMBIOS ---help--- This provides an implementation for the Dell SMBIOS calling interface communicated over SMI/SMM. - If you have a Dell computer from <=2017 you should say Y or M here. + If you have a Dell computer from <=2017 you should say Y here. If you aren't sure and this module doesn't work for your computer it just won't load. diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c388608ad2a3..2ba6cb795338 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o -obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o -obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o +dell-smbios-objs := dell-smbios-base.o +dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o +dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o obj-$(CONFIG_DELL_WMI) += dell-wmi.o obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c index 8541cde4cb7d..5bcf8a18f785 100644 --- a/drivers/platform/x86/dell-smbios.c +++ b/drivers/platform/x86/dell-smbios-base.c @@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex); struct smbios_device { struct list_head list; struct device *device; - int (*call_fn)(struct calling_interface_buffer *); + int (*call_fn)(struct calling_interface_buffer *arg); }; struct smbios_call { @@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm) struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); - /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least - 6 bytes of entry */ + /* + * 4 bytes of table header, plus 7 bytes of Dell header + * plus at least 6 bytes of entry + */ if (dm->length < 17) return; @@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev) static int __init dell_smbios_init(void) { const struct dmi_device *valid; - int ret; + int ret, wmi, smm; valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); if (!valid) { @@ -589,8 +591,24 @@ static int __init dell_smbios_init(void) if (ret) goto fail_create_group; + /* register backends */ + wmi = init_dell_smbios_wmi(); + if (wmi) + pr_debug("Failed to initialize WMI backend: %d\n", wmi); + smm = init_dell_smbios_smm(); + if (smm) + pr_debug("Failed to initialize SMM backend: %d\n", smm); + if (wmi && smm) { + pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", + wmi, smm); + goto fail_sysfs; + } + return 0; +fail_sysfs: + free_group(platform_device); + fail_create_group: platform_device_del(platform_device); @@ -607,6 +625,8 @@ fail_platform_driver: static void __exit dell_smbios_exit(void) { + exit_dell_smbios_wmi(); + exit_dell_smbios_smm(); mutex_lock(&smbios_mutex); if (platform_device) { free_group(platform_device); @@ -623,5 +643,6 @@ module_exit(dell_smbios_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); +MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index 89f65c4651a0..e9e9da556318 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c @@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = { }; MODULE_DEVICE_TABLE(dmi, dell_device_table); -static void __init parse_da_table(const struct dmi_header *dm) +static void parse_da_table(const struct dmi_header *dm) { struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); @@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm) da_command_code = table->cmdIOCode; } -static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) +static void find_cmd_address(const struct dmi_header *dm, void *dummy) { switch (dm->type) { case 0xda: /* Calling interface */ @@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void) return false; } -static int __init dell_smbios_smm_init(void) +int init_dell_smbios_smm(void) { int ret; /* @@ -176,7 +176,7 @@ fail_platform_device_alloc: return ret; } -static void __exit dell_smbios_smm_exit(void) +void exit_dell_smbios_smm(void) { if (platform_device) { dell_smbios_unregister_device(&platform_device->dev); @@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void) free_page((unsigned long)buffer); } } - -subsys_initcall(dell_smbios_smm_init); -module_exit(dell_smbios_smm_exit); - -MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); -MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); -MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); -MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); -MODULE_DESCRIPTION("Dell SMBIOS communications over SMI"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 609557aa5868..fbefedb1c172 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = { { }, }; -static void __init parse_b1_table(const struct dmi_header *dm) +static void parse_b1_table(const struct dmi_header *dm) { struct misc_bios_flags_structure *flags = container_of(dm, struct misc_bios_flags_structure, header); @@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm) wmi_supported = 1; } -static void __init find_b1(const struct dmi_header *dm, void *dummy) +static void find_b1(const struct dmi_header *dm, void *dummy) { switch (dm->type) { case 0xb1: /* misc bios flags */ @@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = { .filter_callback = dell_smbios_wmi_filter, }; -static int __init init_dell_smbios_wmi(void) +int init_dell_smbios_wmi(void) { dmi_walk(find_b1, NULL); @@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void) return wmi_driver_register(&dell_smbios_wmi_driver); } -static void __exit exit_dell_smbios_wmi(void) +void exit_dell_smbios_wmi(void) { wmi_driver_unregister(&dell_smbios_wmi_driver); } -module_init(init_dell_smbios_wmi); -module_exit(exit_dell_smbios_wmi); - MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); -MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); -MODULE_DESCRIPTION("Dell SMBIOS communications over WMI"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index 138d478d9adc..d8adaf959740 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h @@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb); int dell_laptop_unregister_notifier(struct notifier_block *nb); void dell_laptop_call_notifier(unsigned long action, void *data); -#endif +/* for the supported backends */ +#ifdef CONFIG_DELL_SMBIOS_WMI +int init_dell_smbios_wmi(void); +void exit_dell_smbios_wmi(void); +#else /* CONFIG_DELL_SMBIOS_WMI */ +static inline int init_dell_smbios_wmi(void) +{ + return -ENODEV; +} +static inline void exit_dell_smbios_wmi(void) +{} +#endif /* CONFIG_DELL_SMBIOS_WMI */ + +#ifdef CONFIG_DELL_SMBIOS_SMM +int init_dell_smbios_smm(void); +void exit_dell_smbios_smm(void); +#else /* CONFIG_DELL_SMBIOS_SMM */ +static inline int init_dell_smbios_smm(void) +{ + return -ENODEV; +} +static inline void exit_dell_smbios_smm(void) +{} +#endif /* CONFIG_DELL_SMBIOS_SMM */ + +#endif /* _DELL_SMBIOS_H_ */ diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dd4708c58480..1fc0c0811da4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data) rstate = regulator_get_suspend_state(rdev, *state); if (rstate == NULL) - return -EINVAL; + return 0; mutex_lock(&rdev->mutex); diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) * arbitrary timeout. */ ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, - !(val & STM32_VRR), 650, 10000); + val & STM32_VRR, 650, 10000); if (ret) { dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..ecef8e73d40b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) case DASD_CQR_QUEUED: /* request was not started - just set to cleared */ cqr->status = DASD_CQR_CLEARED; - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ @@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) wait_event(dasd_flush_wq, (cqr->status != DASD_CQR_CLEAR_PENDING)); - /* mark sleepon requests as ended */ - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; + /* + * requeue requests to blocklayer will only work + * for block device requests + */ + if (_dasd_requeue_request(cqr)) + continue; /* remove requests from device and block queue */ list_del_init(&cqr->devlist); @@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) cqr = refers; } - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; - if (cqr->block) list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( @@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) list_splice_tail(&requeue_queue, &device->ccw_queue); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } - /* wake up generic waitqueue for eventually ended sleepon requests */ - wake_up(&generic_waitq); + dasd_schedule_device_bh(device); return rc; } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1319122e9d12..9169af7dbb43 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -ETIMEDOUT; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) /* OK, i/o is dead now. Call interrupt handler. */ if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } static void @@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_online_verify(cdev, 0); if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } void ccw_device_kill_io(struct ccw_device *cdev) { int ret; + ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -EIO; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 1caf6a398760..75ce12a24dc2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) } /** - * ccw_device_start_key() - start a s390 channel program with key + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. + * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags) +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags, int expires) { struct subchannel *sch; int ret; @@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, switch (ret) { case 0: cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); break; case -EACCES: case -ENODEV: @@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, } /** - * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * ccw_device_start_key() - start a s390 channel program with key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. - * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). - * This function notifies the device driver if the channel program has not - * completed during the time specified by @expires. If a timeout occurs, the - * channel program is terminated via xsch, hsch or csch, and the device's - * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags, int expires) +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags) { - int ret; - - if (!cdev) - return -ENODEV; - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, + flags, 0); } /** @@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) EXPORT_SYMBOL(ccw_device_get_id); /** - * ccw_device_tm_start_key() - perform start function + * ccw_device_tm_start_timeout_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key) +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) { struct subchannel *sch; int rc; @@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, return -EACCES; } rc = cio_tm_start_key(sch, tcw, lpm, key); - if (rc == 0) + if (rc == 0) { cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); + } return rc; } -EXPORT_SYMBOL(ccw_device_tm_start_key); +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); /** - * ccw_device_tm_start_timeout_key() - perform start function + * ccw_device_tm_start_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access - * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key, - int expires) +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) { - int ret; - - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); } -EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); +EXPORT_SYMBOL(ccw_device_tm_start_key); /** * ccw_device_tm_start() - perform start function diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925..90e4e3a7841b 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -157,6 +157,7 @@ struct ccw_device_private { unsigned long intparm; /* user interruption parameter */ struct qdio_irq *qdio_data; struct irb irb; /* device status */ + int async_kill_io_rc; struct senseid senseid; /* SenseID info */ struct pgid pgid[8]; /* path group IDs per chpid*/ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ca72f3311004..c8b308cfabf1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply->callback = reply_cb; reply->param = reply_param; - if (card->state == CARD_STATE_DOWN) - reply->seqno = QETH_IDX_COMMAND_SEQNO; - else - reply->seqno = card->seqno.ipa++; + init_waitqueue_head(&reply->wait_q); - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; - qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) { cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; event_timeout = QETH_IPA_TIMEOUT; } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; event_timeout = QETH_TIMEOUT; } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); timeout = jiffies + event_timeout; @@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; - cmd->hdr.seqno = card->seqno.ipa; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) @@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset) { - int elements = qeth_get_elements_for_range( - (addr_t)skb->data + data_offset, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index bdd45f4dcace..498fe9af2cdb 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -40,8 +40,40 @@ struct qeth_ipaddr { unsigned int pfxlen; } a6; } u; - }; + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + if (a1->proto != a2->proto) + return false; + if (a1->proto == QETH_PROT_IPV6) + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); + return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), + * so 'proto' and 'addr' match for sure. + * + * For ucast: + * - 'mac' is always 0. + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching + * values are required to avoid mixups in takeover eligibility. + * + * For mcast, + * - 'mac' is mapped from the IP, and thus always matches. + * - 'mask'/'pfxlen' is always 0. + */ + if (a1->type != a2->type) + return false; + if (a1->proto == QETH_PROT_IPV6) + return a1->u.a6.pfxlen == a2->u.a6.pfxlen; + return a1->u.a4.mask == a2->u.a4.mask; +} + static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) { u64 ret = 0; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b0c888e86cd4..962a04b68dd2 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, qeth_l3_ipaddr6_to_string(addr, buf); } +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, + struct qeth_ipaddr *query) +{ + u64 key = qeth_l3_ipaddr_hash(query); + struct qeth_ipaddr *addr; + + if (query->is_multicast) { + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } else { + hash_for_each_possible(card->ip_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } + return NULL; +} + static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) { int i, j; @@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, return rc; } -inline int -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) -{ - return addr1->proto == addr2->proto && - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && - ether_addr_equal_64bits(addr1->mac, addr2->mac); -} - -static struct qeth_ipaddr * -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) -{ - struct qeth_ipaddr *addr; - - if (tmp_addr->is_multicast) { - hash_for_each_possible(card->ip_mc_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } else { - hash_for_each_possible(card->ip_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } - - return NULL; -} - int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; @@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) return -ENOENT; addr->ref_counter--; - if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP)) + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) return rc; if (addr->in_progress) return -EINPROGRESS; - if (!qeth_card_hw_is_reachable(card)) { - addr->disp_flag = QETH_DISP_ADDR_DELETE; - return 0; - } - - rc = qeth_l3_deregister_addr_entry(card, addr); + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l3_deregister_addr_entry(card, addr); hash_del(&addr->hnode); kfree(addr); @@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; struct qeth_ipaddr *addr; + char buf[40]; QETH_CARD_TEXT(card, 4, "addip"); @@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) { + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (addr) { + if (tmp_addr->type != QETH_IP_TYPE_NORMAL) + return -EADDRINUSE; + if (qeth_l3_addr_match_all(addr, tmp_addr)) { + addr->ref_counter++; + return 0; + } + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, + buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + return -EADDRINUSE; + } else { addr = qeth_l3_get_addr_buffer(tmp_addr->proto); if (!addr) return -ENOMEM; @@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) (rc == IPA_RC_LAN_OFFLINE)) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; if (addr->ref_counter < 1) { - qeth_l3_delete_ip(card, addr); + qeth_l3_deregister_addr_entry(card, addr); + hash_del(&addr->hnode); kfree(addr); } } else { hash_del(&addr->hnode); kfree(addr); } - } else { - if (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP) - addr->ref_counter++; } - return rc; } @@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { - qeth_l3_deregister_addr_entry(card, addr); - hash_del(&addr->hnode); - kfree(addr); - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; spin_unlock_bh(&card->ip_lock); @@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - rc = qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - rc = qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); @@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, sizeof(struct in6_addr)); tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; continue; } @@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card, static int qeth_l3_get_elements_no_tso(struct qeth_card *card, struct sk_buff *skb, int extra_elems) { - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); - int elements = qeth_get_elements_for_range( - tcpdptr, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 57bf43e34863..dd9464920456 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev) if (shost->work_q) destroy_workqueue(shost->work_q); - destroy_rcu_head(&shost->rcu); - if (shost->shost_state == SHOST_CREATED) { /* * Free the shost_dev device name here if scsi_host_alloc() @@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); mutex_init(&shost->scan_mutex); - init_rcu_head(&shost->rcu); index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); if (index < 0) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..dc8e850fbfd2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, /** * megasas_fire_cmd_fusion - Sends command to the FW * @instance: Adapter soft state - * @req_desc: 32bit or 64bit Request descriptor + * @req_desc: 64bit Request descriptor * - * Perform PCI Write. Ventura supports 32 bit Descriptor. - * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. + * Perform PCI Write. */ static void megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { - if (instance->adapter_type == VENTURA_SERIES) - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_single_queue_port); - else { #if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | - le32_to_cpu(req_desc->u.low)); + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); - writeq(req_data, &instance->reg_set->inbound_low_queue_port); + writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else - unsigned long flags; - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc->u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); #endif - } } /** @@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_2; - unsigned long flags; ktime_t time; bool cur_fw_64bit_dma_capable; @@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - /* For Ventura also IOC INIT required 64 bit Descriptor write. */ - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc.u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc.u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + megasas_fire_cmd_fusion(instance, &req_desc); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 59a87ca328d3..0aafbfd1b746 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -6297,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) } /** - * _wait_for_commands_to_complete - reset controller + * mpt3sas_wait_for_commands_to_complete - reset controller * @ioc: Pointer to MPT_ADAPTER structure * * This function is waiting 10s for all pending commands to complete * prior to putting controller in reset. */ -static void -_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) { u32 ioc_state; @@ -6377,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, is_fault = 1; } _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); - _wait_for_commands_to_complete(ioc); + mpt3sas_wait_for_commands_to_complete(ioc); _base_mask_interrupts(ioc); r = _base_make_ioc_ready(ioc, type); if (r) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 789bc421424b..99ccf83b8c51 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); + /* scsih shared API */ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..c2ea13c7e37e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) ioc->name, scmd); scsi_print_command(scmd); - if (ioc->is_driver_loading) { + if (ioc->is_driver_loading || ioc->remove_host) { pr_info(MPT3SAS_FMT "Blocking the host reset\n", ioc->name); r = FAILED; @@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) st = scsi_cmd_priv(scmd); mpt3sas_base_clear_st(ioc, st); scsi_dma_unmap(scmd); - if (ioc->pci_error_recovery) + if (ioc->pci_error_recovery || ioc->remove_host) scmd->result = DID_NO_CONNECT << 16; else scmd->result = DID_RESET << 16; @@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 667d7697ba01..d09afe1b567d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, iscsi_cid = cqe->conn_id; qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!qedi_conn) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "icid not found 0x%x\n", cqe->conn_id); + return; + } /* Based on this itt get the corresponding qedi_cmd */ spin_lock_bh(&qedi_conn->tmf_work_lock); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index be7d6824581a..c9689f97c307 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -261,9 +261,9 @@ struct name_list_extended { struct get_name_list_extended *l; dma_addr_t ldma; - struct list_head fcports; /* protect by sess_list */ + struct list_head fcports; + spinlock_t fcports_lock; u32 size; - u8 sent; }; /* * Timeout timer counts in seconds @@ -2217,6 +2217,7 @@ typedef struct { /* FCP-4 types */ #define FC4_TYPE_FCP_SCSI 0x08 +#define FC4_TYPE_NVME 0x28 #define FC4_TYPE_OTHER 0x0 #define FC4_TYPE_UNKNOWN 0xff diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5bf9a59432f6..403fa096f8c8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3179,6 +3179,7 @@ done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -3370,6 +3371,7 @@ done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -3971,6 +3973,9 @@ out: spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); + + if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled) + qla24xx_async_gpnft(vha, FC4_TYPE_NVME); } static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 2dea1129d396..00329dda6179 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -213,6 +213,7 @@ done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -263,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); done: - fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); return rval; } @@ -271,6 +272,7 @@ void qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { + fcport->flags &= ~FCF_ASYNC_ACTIVE; /* Don't re-login in target mode */ if (!fcport->tgt_session) qla2x00_mark_device_lost(vha, fcport, 1, 0); @@ -284,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res) struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; + sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; if (!test_bit(UNLOADING, &vha->dpc_flags)) qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, lio->u.logio.data); @@ -322,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -375,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) "Async done-%s res %x %8phC\n", sp->name, res, sp->fcport->port_name); + sp->fcport->flags &= ~FCF_ASYNC_SENT; + memset(&ea, 0, sizeof(ea)); ea.event = FCME_ADISC_DONE; ea.rc = res; @@ -425,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, done_free_sp: sp->free(sp); done: - fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_post_async_adisc_work(vha, fcport, data); return rval; } @@ -643,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) (loop_id & 0x7fff)); } - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - vha->gnl.sent = 0; + spin_lock_irqsave(&vha->gnl.fcports_lock, flags); INIT_LIST_HEAD(&h); fcport = tf = NULL; @@ -653,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res) list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { list_del_init(&fcport->gnl_entry); + spin_lock(&vha->hw->tgt.sess_lock); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + spin_unlock(&vha->hw->tgt.sess_lock); ea.fcport = fcport; qla2x00_fcport_event_handler(vha, &ea); } + spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create new fcport if fw has knowledge of new sessions */ for (i = 0; i < n; i++) { port_id_t id; @@ -710,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + spin_lock_irqsave(&vha->gnl.fcports_lock, flags); + if (!list_empty(&fcport->gnl_entry)) { + spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); + rval = QLA_SUCCESS; + goto done; + } + + spin_lock(&vha->hw->tgt.sess_lock); fcport->disc_state = DSC_GNL; fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; + spin_unlock(&vha->hw->tgt.sess_lock); list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); - if (vha->gnl.sent) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - return QLA_SUCCESS; - } - vha->gnl.sent = 1; - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1049,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) fc_port_t *fcport = ea->fcport; struct port_database_24xx *pd; struct srb *sp = ea->sp; + uint8_t ls; pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; @@ -1061,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) if (fcport->disc_state == DSC_DELETE_PEND) return; - switch (pd->current_login_state) { + if (fcport->fc4f_nvme) + ls = pd->current_login_state >> 4; + else + ls = pd->current_login_state & 0xf; + + switch (ls) { case PDS_PRLI_COMPLETE: __qla24xx_parse_gpdb(vha, fcport, pd); break; @@ -1151,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (fcport->scan_state != QLA_FCPORT_FOUND) return 0; - if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PRLI_PEND)) + if ((fcport->loop_id != FC_NO_LOOP_ID) && + ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || + (fcport->fw_login_state == DSC_LS_PRLI_PEND))) return 0; if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { @@ -1527,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res) srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; + del_timer(&sp->u.iocb_cmd.timer); complete(&abt->u.abt.comp); } @@ -1791,6 +1811,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, qla2x00_mark_device_lost(vha, fcport, 1, 0); qlt_logo_completion_handler(fcport, data[0]); fcport->login_gen++; + fcport->flags &= ~FCF_ASYNC_ACTIVE; return; } @@ -1798,6 +1819,7 @@ void qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (data[0] == MBS_COMMAND_COMPLETE) { qla2x00_update_fcport(vha, fcport); @@ -1805,7 +1827,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, } /* Retry login. */ - fcport->flags &= ~FCF_ASYNC_SENT; if (data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index afcb5567998a..285911e81728 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4577,6 +4577,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); + spin_lock_init(&vha->gnl.fcports_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); @@ -4806,9 +4807,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) fcport->d_id = e->u.new_sess.id; fcport->flags |= FCF_FABRIC_DEVICE; fcport->fw_login_state = DSC_LS_PLOGI_PEND; - if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) + if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) { fcport->fc4_type = FC4_TYPE_FCP_SCSI; - + } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) { + fcport->fc4_type = FC4_TYPE_OTHER; + fcport->fc4f_nvme = FC4_TYPE_NVME; + } memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); } else { @@ -4877,6 +4881,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) } qlt_plogi_ack_unref(vha, pla); } else { + fc_port_t *dfcp = NULL; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); tfcp = qla2x00_find_fcport_by_nportid(vha, &e->u.new_sess.id, 1); @@ -4899,11 +4905,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) default: fcport->login_pause = 1; tfcp->conflict = fcport; - qlt_schedule_sess_for_deletion(tfcp); + dfcp = tfcp; break; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + if (dfcp) + qlt_schedule_sess_for_deletion(tfcp); wwn = wwn_to_u64(fcport->node_name); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 896b2d8bd803..b49ac85f3de2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) } } -/* ha->tgt.sess_lock supposed to be held on entry */ void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; + struct qla_hw_data *ha = sess->vha->hw; unsigned long flags; if (sess->disc_state == DSC_DELETE_PEND) @@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) return; } + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->deleted == QLA_SESS_DELETED) sess->logout_on_delete = 0; - spin_lock_irqsave(&sess->vha->work_lock, flags); if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&sess->vha->work_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; } sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - spin_unlock_irqrestore(&sess->vha->work_lock, flags); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->disc_state = DSC_DELETE_PEND; @@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - /* use cancel to push work element through before re-queue */ - cancel_work_sync(&sess->del_work); INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); - queue_work(sess->vha->hw->wq, &sess->del_work); + WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } -/* ha->tgt.sess_lock supposed to be held on entry */ static void qlt_clear_tgt_db(struct qla_tgt *tgt) { struct fc_port *sess; @@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); sess->local = 1; - qlt_schedule_sess_for_deletion(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + qlt_schedule_sess_for_deletion(sess); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) @@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt) * Lock is needed, because we still can get an incoming packet. */ mutex_lock(&vha->vha_tgt.tgt_mutex); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d042915ce895..ca53a5f785ee 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) static void scsi_eh_inc_host_failed(struct rcu_head *head) { - struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); + struct Scsi_Host *shost = scmd->device->host; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); @@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) * Ensure that all tasks observe the host state change before the * host_failed change. */ - call_rcu(&shost->rcu, scsi_eh_inc_host_failed); + call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); } /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..c84f931388f2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; + destroy_rcu_head(&cmd->rcu); } if (req->mq_ctx) { @@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) { switch (host_byte(result)) { + case DID_OK: + return BLK_STS_OK; case DID_TRANSPORT_FAILFAST: return BLK_STS_TRANSPORT; case DID_TARGET_FAILURE: @@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); scsi_req_init(&cmd->req); + init_rcu_head(&cmd->rcu); cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; } diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 6be5ab32c94f..8c51d628b52e 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device, */ cpumask_and(&alloced_mask, &stor_device->alloced_cpus, cpumask_of_node(cpu_to_node(q_num))); - for_each_cpu(tgt_cpu, &alloced_mask) { + for_each_cpu_wrap(tgt_cpu, &alloced_mask, + outgoing_channel->target_cpu + 1) { if (tgt_cpu != outgoing_channel->target_cpu) { outgoing_channel = stor_device->stor_chns[tgt_cpu]; diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index af6fc97f4ba4..a436d44f1b7f 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; - int index, count, i; + unsigned int index, count, i; if (get_user(index, &c->index) || __get_user(count, &c->count) || @@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned char __user *ugreen; unsigned char __user *ublue; struct fb_cmap *cmap = &info->cmap; - int index, count, i; + unsigned int index, count, i; u8 red, green, blue; if (get_user(index, &c->index) || diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index eb30f3e09a47..71458f493cf8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -428,8 +428,6 @@ unmap_release: i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); } - vq->vq.num_free += total_sg; - if (indirect) kfree(desc); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 74888cacd0b0..ec9eb4fba59c 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus, /* Register with generic device framework. */ err = device_register(&xendev->dev); - if (err) + if (err) { + put_device(&xendev->dev); + xendev = NULL; goto fail; + } return 0; fail: |