aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit.c11
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c5
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c4
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c59
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c64
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c153
-rw-r--r--drivers/input/rmi4/rmi_f12.c9
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c15
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/mmc/card/block.c16
-rw-r--r--drivers/mmc/host/pxamci.c16
-rw-r--r--drivers/nvme/host/core.c74
-rw-r--r--drivers/tty/vt/keyboard.c30
29 files changed, 424 insertions, 158 deletions
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ac6ddcc080d4..1f0e06065ae6 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/*
* Until standardization materializes we need to consider up to 3
- * different command sets. Note, that checking for zero functions
- * tells us if any commands might be reachable through this uuid.
+ * different command sets. Note, that checking for function0 (bit0)
+ * tells us if any commands are reachable through this uuid.
*/
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
break;
/* limit the supported commands to those that are publicly documented */
@@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (disable_vendor_specific)
dsm_mask &= ~(1 << 8);
} else {
- dev_err(dev, "unknown dimm command family\n");
+ dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
- return force_enable_dimms ? 0 : -ENODEV;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
}
uuid = to_nfit_uuid(nfit_mem->family);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index b4de130f2d57..22c09952e177 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -680,6 +680,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
u64 mask = 0;
union acpi_object *obj;
+ if (funcs == 0)
+ return false;
+
obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
if (!obj)
return false;
@@ -692,9 +695,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
- if (funcs == 0)
- return true;
-
/*
* Bit 0 indicates whether there's support for any functions other than
* function 0 for the specified UUID and revision.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6be7770f68e9..31c183aed368 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4314,6 +4314,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 10f846cc8db1..25d5906640c3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned int mask = layout->css_mask;
- unsigned int pckr = 0;
+ unsigned int pckr = index;
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 445a7498d6df..9780fac6d029 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data {
u8 width_div;
u8 width_mux;
+
+ u32 flags;
};
struct reset_data {
@@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node,
data->has_div ? &div->hw : NULL,
data->has_div ? &clk_divider_ops : NULL,
&gate->hw, &clk_gate_ops,
- 0);
+ data->flags);
if (IS_ERR(clk)) {
pr_err("%s: Couldn't register the clock\n", clk_name);
goto free_div;
@@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon
.offset_rst = 29,
.offset_mux = 24,
.width_mux = 2,
+ .flags = CLK_SET_RATE_PARENT,
};
static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index 98a4582de56a..b6d29d1bedca 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw)
static u8 tcon_ch1_get_parent(struct clk_hw *hw)
{
struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 reg;
reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
- if (reg >= num_parents)
- return -EINVAL;
-
return reg;
}
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 6d74b91f2152..5fc3dbb9ada0 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
$(obj)/qat_rsapubkey-asn1.h
$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
$(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 536112fd2466..d7860614f87f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -402,9 +402,12 @@ config GPIO_TB10X
select OF_GPIO
config GPIO_TEGRA
- bool
- default y
+ bool "NVIDIA Tegra GPIO support"
+ default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on OF
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f313b4d8344f..85c4debf47e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7c334e902266..bc3f2e6842e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -990,6 +990,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
@@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table {
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define KBL_REVID_A0 0x0
+#define KBL_REVID_B0 0x1
+#define KBL_REVID_C0 0x2
+#define KBL_REVID_D0 0x3
+#define KBL_REVID_E0 0x4
+
+#define IS_KBL_REVID(p, since, until) \
+ (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..44004e3f09e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ */
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..aab47f7bb61b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, iir);
@@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..3fcf7dd5b6ca 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define GEN8_CONFIG0 _MMIO(0xD00)
+#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -1669,6 +1672,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -1804,6 +1810,10 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
@@ -2200,6 +2210,8 @@ enum skl_disp_power_wells {
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
+#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -6031,6 +6043,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6039,6 +6052,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
@@ -6052,6 +6066,9 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define MASK_WAKEMEM (1<<13)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
@@ -6069,6 +6086,7 @@ enum skl_disp_power_wells {
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */
@@ -6076,6 +6094,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018)
@@ -6921,6 +6940,7 @@ enum skl_disp_power_wells {
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -6937,6 +6957,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 04452cf3eae8..3074c56a643d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11997,6 +11997,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = intel_color_check(crtc, crtc_state);
if (ret)
return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
}
ret = 0;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 40745e38d438..891107f92d9f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4645,7 +4645,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp->detect_done = false;
- if (intel_connector->detect_edid)
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
return connector_status_connected;
else
return connector_status_disconnected;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..7f2d8415ed8b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
{
int ret;
struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
return ret;
index = ret;
+ /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* Actual scratch location is at 128 bytes offset */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+ uint32_t scratch_addr
+ = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa = false;
+ bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
+ int len;
flags |= PIPE_CONTROL_CS_STALL;
@@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
*/
if (IS_GEN9(engine->dev))
vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
}
- ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ ret = intel_ring_begin(request, len);
if (ret)
return ret;
@@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
intel_logical_ring_emit(ringbuf, 0);
}
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
+
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_advance(ringbuf);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..aba94099886b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..2863b92c9da6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -54,10 +54,38 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+ I915_WRITE(GEN8_CONFIG0,
+ I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+ /* WaEnableChickenDCPR:skl,bxt,kbl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
+}
+
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ gen9_init_clock_gating(dev);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaDisableSDEUnitClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableGamClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaFbcNukeOnHostModify:kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaFbcNukeOnHostModify:skl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..68c5af079ef8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
int ret;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaClearFlowControlGpgpuContextSave:skl,bxt */
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
- /* WaOCLCoherentLineFlush:skl,bxt */
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ if (ret)
+ return ret;
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
@@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- /* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
- }
-
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
@@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
@@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret;
}
+ /* WaInsertDummyPushConstPs:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaInsertDummyPushConstPs:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
if (IS_BROXTON(dev))
return bxt_init_workarounds(engine);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_init_workarounds(engine);
+
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 8dd3fb5e1f94..88e91559c84e 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
struct rmi_device *rmi_dev = fn->rmi_dev;
int ret;
int offset;
- u8 buf[14];
+ u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
- if (item->reg_size > 14) {
- dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
- item->reg_size);
+ if (item->reg_size > sizeof(buf)) {
+ dev_err(&fn->dev,
+ "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+ sizeof(buf), item->reg_size);
return -ENODEV;
}
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 7295c198aa08..6fe55d598fac 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -22,6 +22,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b9f593dfd2ef..f2c5f0e47f77 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -24,6 +24,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 15240c1ee850..dfa7f1c4f545 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 7a482d102614..49a63a3c6840 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,7 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e62fde3ac431..c5472e3c9231 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -355,8 +355,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
goto idata_err;
}
- if (!idata->buf_bytes)
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
return idata;
+ }
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
@@ -1786,8 +1788,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
@@ -1801,14 +1803,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 86fac3e86833..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro))
+ if (gpio_is_valid(gpio_ro)) {
ret = mmc_gpio_request_ro(mmc, gpio_ro);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
}
if (gpio_is_valid(gpio_cd))
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1a51584a382b..d5fb55c0a9d9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
return nsa->ns_id - nsb->ns_id;
}
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns *ns;
-
- lockdep_assert_held(&ctrl->namespaces_mutex);
+ struct nvme_ns *ns, *ret = NULL;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid)
- return ns;
+ if (ns->ns_id == nsid) {
+ kref_get(&ns->kref);
+ ret = ns;
+ break;
+ }
if (ns->ns_id > nsid)
break;
}
- return NULL;
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_add_tail(&ns->list, &ctrl->namespaces);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns)
{
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
- synchronize_rcu();
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
+
nvme_put_ns(ns);
}
@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
- ns = nvme_find_ns(ctrl, nsid);
+ ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
} else
nvme_alloc_ns(ctrl, nsid);
}
@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) {
- ns = nvme_find_ns(ctrl, prev);
- if (ns)
+ ns = nvme_find_get_ns(ctrl, prev);
+ if (ns) {
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
}
}
nn -= j;
@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
struct nvme_ns *ns, *next;
unsigned i;
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id))
return;
- mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work)
}
nvme_scan_ns_sequential(ctrl, nn);
done:
+ mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
- if (!kref_get_unless_zero(&ns->kref))
- continue;
-
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
-
- nvme_put_ns(ns);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index f973bfce5d08..1e93a37e27f0 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
static void do_compute_shiftstate(void)
{
- unsigned int i, j, k, sym, val;
+ unsigned int k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
- if (!key_down[i])
+ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+ sym = U(key_maps[0][k]);
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
- k = i * BITS_PER_LONG;
-
- for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
- if (!test_bit(k, key_down))
- continue;
+ val = KVAL(sym);
+ if (val == KVAL(K_CAPSSHIFT))
+ val = KVAL(K_SHIFT);
- sym = U(key_maps[0][k]);
- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
- continue;
-
- val = KVAL(sym);
- if (val == KVAL(K_CAPSSHIFT))
- val = KVAL(K_SHIFT);
-
- shift_down[val]++;
- shift_state |= (1 << val);
- }
+ shift_down[val]++;
+ shift_state |= BIT(val);
}
}