aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie2016-11-30 14:21:35 +1000
committerDave Airlie2016-11-30 14:21:35 +1000
commit35838b470ab592071087a46804d373c5571dc032 (patch)
treeddad21fc37545addfbf642bf388ba6221375939e
parent63207455963053ca212e61c75f43b3502ea69f0e (diff)
parente9cbc4bd0140e1d4e0172e2fe8fe07ba278e5980 (diff)
Merge tag 'drm-intel-next-2016-11-21' of git://anongit.freedesktop.org/git/drm-intel into drm-next
Final 4.10 updates: - fine-tune fb flushing and tracking (Chris Wilson) - refactor state check dumper code for more conciseness (Tvrtko) - roll out dev_priv all over the place (Tvrkto) - finally remove __i915__ magic macro (Tvrtko) - more gvt bugfixes (Zhenyu&team) - better opregion CADL handling (Jani) - refactor/clean up wm programming (Maarten) - gpu scheduler + priority boosting for flips as first user (Chris Wilson) - make fbc use more atomic (Paulo) - initial kvm-gvt framework, but not yet complete (Zhenyu&team) * tag 'drm-intel-next-2016-11-21' of git://anongit.freedesktop.org/git/drm-intel: (127 commits) drm/i915: Update DRIVER_DATE to 20161121 drm/i915: Skip final clflush if LLC is coherent drm/i915: Always flush the dirty CPU cache when pinning the scanout drm/i915: Don't touch NULL sg on i915_gem_object_get_pages_gtt() error drm/i915: Check that each request phase is completed before retiring drm/i915: i915_pages_create_for_stolen should return err ptr drm/i915: Enable support for nonblocking modeset drm/i915: Be more careful to drop the GT wakeref drm/i915: Move frontbuffer CS write tracking from ggtt vma to object drm/i915: Only dump dp_m2_n2 configuration when drrs is used drm/i915: don't leak global_timeline drm/i915: add i915_address_space_fini drm/i915: Add a few more sanity checks for stolen handling drm/i915: Waterproof verification of gen9 forcewake table ranges drm/i915: Introduce enableddisabled helper drm/i915: Only dump possible panel fitter config for the platform drm/i915: Only dump scaler config where supported drm/i915: Compact a few pipe config debug lines drm/i915: Don't log pipe config kernel pointer and duplicated pipe name drm/i915: Dump FDI config only where applicable ...
-rw-r--r--arch/x86/include/asm/kvm_page_track.h14
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/page_track.c31
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/i915/Kconfig26
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile7
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c202
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h63
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c597
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h55
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c180
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c32
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h543
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c608
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c (renamed from drivers/gpu/drm/i915/i915_gem_fence.c)12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c579
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h235
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h338
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c206
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h74
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c39
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c24
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h17
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c638
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h341
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c9
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c30
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c31
-rw-r--r--drivers/gpu/drm/i915/intel_color.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c598
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h28
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c83
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c187
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c139
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c170
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c121
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/uapi/drm/i915_drm.h5
96 files changed, 4442 insertions, 2789 deletions
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24a235c..d74747b031ec 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -29,9 +29,20 @@ struct kvm_page_track_notifier_node {
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
+ * @node: this node
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes);
+ int bytes, struct kvm_page_track_notifier_node *node);
+ /*
+ * It is called when memory slot is being moved or removed
+ * users can drop write-protection for the pages in that memory slot
+ *
+ * @kvm: the kvm where memory slot being moved or removed
+ * @slot: the memory slot being moved or removed
+ * @node: this node
+ */
+ void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
};
void kvm_page_track_init(struct kvm *kvm);
@@ -58,4 +69,5 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e986b4e4..87c5880ba3b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4405,7 +4405,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ const u8 *new, int bytes,
+ struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@@ -4617,11 +4618,19 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
init_kvm_mmu(vcpu);
}
+static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+}
+
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
+ node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539c3714..4a1c13eaa518 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -106,6 +106,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm_flush_remote_tlbs(kvm);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
/*
* remove the guest page from the tracking pool which stops the interception
@@ -135,6 +136,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
@@ -181,6 +183,7 @@ kvm_page_track_register_notifier(struct kvm *kvm,
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
/*
* stop receiving the event interception. It is the opposed operation of
@@ -199,6 +202,7 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
+EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
/*
* Notify the node that write access is intercepted and write emulation is
@@ -222,6 +226,31 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
- n->track_write(vcpu, gpa, new, bytes);
+ n->track_write(vcpu, gpa, new, bytes, n);
+ srcu_read_unlock(&head->track_srcu, idx);
+}
+
+/*
+ * Notify the node that memory slot is being removed or moved so that it can
+ * drop write-protection for the pages in the memory slot.
+ *
+ * The node should figure out it has any write-protected pages in this slot
+ * by itself.
+ */
+void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_page_track_notifier_head *head;
+ struct kvm_page_track_notifier_node *n;
+ int idx;
+
+ head = &kvm->arch.track_notifier_head;
+
+ if (hlist_empty(&head->track_notifier_list))
+ return;
+
+ idx = srcu_read_lock(&head->track_srcu);
+ hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+ if (n->track_flush_slot)
+ n->track_flush_slot(kvm, slot, n);
srcu_read_unlock(&head->track_srcu, idx);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3017de0431bd..7e30c720d0c5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8155,7 +8155,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_invalidate_zap_all_pages(kvm);
+ kvm_page_track_flush_slot(kvm, slot);
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7eec18925b70..6798c3ad9d53 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3611,32 +3611,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
EXPORT_SYMBOL(drm_av_sync_delay);
/**
- * drm_select_eld - select one ELD from multiple HDMI/DP sinks
- * @encoder: the encoder just changed display mode
- *
- * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
- * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
- *
- * Return: The connector associated with the first HDMI/DP sink that has ELD
- * attached to it.
- */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- drm_for_each_connector(connector, dev)
- if (connector->encoder == encoder && connector->eld[0])
- return connector;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_select_eld);
-
-/**
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index df96aed6975a..5ddde7349fbd 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,15 +36,20 @@ config DRM_I915
If "M" is selected, the module will be called i915.
-config DRM_I915_PRELIMINARY_HW_SUPPORT
- bool "Enable preliminary support for prerelease Intel hardware by default"
+config DRM_I915_ALPHA_SUPPORT
+ bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
default n
help
- Choose this option if you have prerelease Intel hardware and want the
- i915 driver to support it by default. You can enable such support at
- runtime with the module option i915.preliminary_hw_support=1; this
- option changes the default for that module option.
+ Choose this option if you have new Intel hardware and want to enable
+ the alpha quality i915 driver support for the hardware in this kernel
+ version. You can also enable the support at runtime using the module
+ parameter i915.alpha_support=1; this option changes the default for
+ that module parameter.
+
+ It is recommended to upgrade to a kernel version with proper support
+ as soon as it is available. Generally fixes for platforms with alpha
+ support are not backported to older kernels.
If in doubt, say "N".
@@ -107,6 +112,15 @@ config DRM_I915_GVT
If in doubt, say "N".
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM/VFIO support for Intel GVT-g"
+ depends on DRM_I915_GVT
+ depends on KVM
+ default n
+ help
+ Choose this option if you want to enable KVMGT support for
+ Intel GVT-g.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0857e5035f4d..3dea46af9fe6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,7 +33,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
- i915_gem_fence.o \
+ i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
@@ -45,6 +45,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_trace_points.o \
+ i915_vma.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
intel_hangcheck.o \
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 34ea4776af70..8a46a7f31d53 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -3,5 +3,8 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+
+CFLAGS_kvmgt.o := -Wno-unused-function
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4c687740f5f1..db516382a4d4 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -47,11 +47,9 @@ enum {
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
-
if (WARN_ON(bytes > 4))
return -EINVAL;
@@ -82,9 +80,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
- vgpu_aperture_sz(vgpu)
- >> PAGE_SHIFT, map,
- GVT_MAP_APERTURE);
+ vgpu_aperture_sz(vgpu) >>
+ PAGE_SHIFT, map);
if (ret)
return ret;
@@ -235,10 +232,9 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
int ret;
if (WARN_ON(bytes > 4))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0084ece8d8ff..d26a092c70e8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1418,8 +1418,8 @@ static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
{
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
- sizeof(u32));
+ int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
+ sizeof(u32);
unsigned long gma, gma_high;
int ret = 0;
@@ -2537,7 +2537,8 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
- if (bypass_scan_mask & (1 << workload->ring_id))
+ if ((bypass_scan_mask & (1 << workload->ring_id)) ||
+ gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 7e1da1c563ca..bda85dff7b2a 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -502,8 +502,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* ACK of I2C_WRITE
* returned byte if it is READ
*/
-
- aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
+ aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index de366b1d5196..f6dfc8b795ec 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -44,7 +44,7 @@
#define GVT_AUX_I2C_READ 0x1
#define GVT_AUX_I2C_STATUS 0x2
#define GVT_AUX_I2C_MOT 0x4
-#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
+#define GVT_AUX_I2C_REPLY_ACK 0x0
struct intel_vgpu_edid_data {
bool data_valid;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index c1f6019d8895..f32bb6f6495c 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -838,23 +838,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long ring_bitmap)
+ unsigned long engine_mask)
{
- int bit;
- struct list_head *pos, *n;
- struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
- for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
- if (bit >= I915_NUM_ENGINES)
- break;
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
/* free the unsubmited workload in the queue */
- list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
- workload = container_of(pos,
- struct intel_vgpu_workload, list);
- list_del_init(&workload->list);
- free_workload(workload);
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
}
- init_vgpu_execlist(vgpu, bit);
+ init_vgpu_execlist(vgpu, engine->id);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 635f31c6dcc1..7eced40a1e30 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -183,6 +183,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long ring_bitmap);
+ unsigned long engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6554da9f9f5b..7eaaf1c9ed2b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -138,36 +138,6 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
memcpy(&(e)->val64, &v, sizeof(v)); \
} while (0)
-enum {
- GTT_TYPE_INVALID = -1,
-
- GTT_TYPE_GGTT_PTE,
-
- GTT_TYPE_PPGTT_PTE_4K_ENTRY,
- GTT_TYPE_PPGTT_PTE_2M_ENTRY,
- GTT_TYPE_PPGTT_PTE_1G_ENTRY,
-
- GTT_TYPE_PPGTT_PTE_ENTRY,
-
- GTT_TYPE_PPGTT_PDE_ENTRY,
- GTT_TYPE_PPGTT_PDP_ENTRY,
- GTT_TYPE_PPGTT_PML4_ENTRY,
-
- GTT_TYPE_PPGTT_ROOT_ENTRY,
-
- GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
- GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
-
- GTT_TYPE_PPGTT_ENTRY,
-
- GTT_TYPE_PPGTT_PTE_PT,
- GTT_TYPE_PPGTT_PDE_PT,
- GTT_TYPE_PPGTT_PDP_PT,
- GTT_TYPE_PPGTT_PML4_PT,
-
- GTT_TYPE_MAX,
-};
-
/*
* Mappings between GTT_TYPE* enumerations.
* Following information can be found according to the given type:
@@ -842,13 +812,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
+ intel_gvt_gtt_type_t cur_pt_type;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
return -EINVAL;
- if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
- return 0;
-
+ if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ cur_pt_type = get_next_pt_type(e->type) + 1;
+ if (ops->get_pfn(e) ==
+ vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
+ return 0;
+ }
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) {
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
@@ -999,7 +974,7 @@ fail:
}
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
- struct intel_gvt_gtt_entry *we, unsigned long index)
+ unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
@@ -1008,34 +983,35 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry e;
int ret;
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
- we->val64, index);
-
ppgtt_get_shadow_entry(spt, &e, index);
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ index);
+
if (!ops->test_present(&e))
return 0;
- if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
return 0;
- if (gtt_type_is_pt(get_next_pt_type(we->type))) {
- struct intel_vgpu_guest_page *g =
- intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
- if (!g) {
+ if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ struct intel_vgpu_ppgtt_spt *s =
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ if (!s) {
gvt_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
- ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
+ ret = ppgtt_invalidate_shadow_page(s);
if (ret)
goto fail;
}
- ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
+ ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, we->val64, we->type);
+ vgpu->id, spt, e.val64, e.type);
return ret;
}
@@ -1256,23 +1232,16 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry ge;
- int old_present, new_present;
int ret;
+ int new_present;
- ppgtt_get_guest_entry(spt, &ge, index);
-
- old_present = ops->test_present(&ge);
new_present = ops->test_present(we);
- ppgtt_set_guest_entry(spt, we, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ if (ret)
+ goto fail;
- if (old_present) {
- ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
- if (ret)
- goto fail;
- }
if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret)
@@ -1318,7 +1287,7 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_ppgtt_spt *spt;
- struct intel_gvt_gtt_entry ge, e;
+ struct intel_gvt_gtt_entry ge;
unsigned long index;
int ret;
@@ -1329,9 +1298,6 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
for_each_set_bit(index, spt->post_shadow_bitmap,
GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index);
- e = ge;
- e.val64 = 0;
- ppgtt_set_guest_entry(spt, &e, index);
ret = ppgtt_handle_guest_write_page_table(
&spt->guest_page, &ge, index);
@@ -1359,8 +1325,6 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
ppgtt_get_guest_entry(spt, &we, index);
- memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
- p_data, bytes);
ops->test_pse(&we);
@@ -1369,19 +1333,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
if (ret)
return ret;
} else {
- struct intel_gvt_gtt_entry ge;
-
- ppgtt_get_guest_entry(spt, &ge, index);
-
if (!test_bit(index, spt->post_shadow_bitmap)) {
- ret = ppgtt_handle_guest_entry_removal(gpt,
- &ge, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, index);
if (ret)
return ret;
}
ppgtt_set_post_shadow(spt, index);
- ppgtt_set_guest_entry(spt, &we, index);
}
if (!enable_out_of_sync)
@@ -1921,47 +1879,101 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
-static int create_scratch_page(struct intel_vgpu *vgpu)
+static int alloc_scratch_pages(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
- void *p;
- void *vaddr;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ int page_entry_num = GTT_PAGE_SIZE >>
+ vgpu->gvt->device_info.gtt_entry_size_shift;
+ struct page *scratch_pt;
unsigned long mfn;
+ int i;
+ void *p;
+
+ if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ return -EINVAL;
- gtt->scratch_page = alloc_page(GFP_KERNEL);
- if (!gtt->scratch_page) {
- gvt_err("Failed to allocate scratch page.\n");
+ scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ if (!scratch_pt) {
+ gvt_err("fail to allocate scratch page\n");
return -ENOMEM;
}
- /* set to zero */
- p = kmap_atomic(gtt->scratch_page);
- memset(p, 0, PAGE_SIZE);
+ p = kmap_atomic(scratch_pt);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
+ kunmap_atomic(p);
+ __free_page(scratch_pt);
+ return -EFAULT;
+ }
+ gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page = scratch_pt;
+ gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
+ vgpu->id, type, mfn);
+
+ /* Build the tree by full filled the scratch pt with the entries which
+ * point to the next level scratch pt or scratch page. The
+ * scratch_pt[type] indicate the scratch pt/scratch page used by the
+ * 'type' pt.
+ * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
+ */
+ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+ struct intel_gvt_gtt_entry se;
+
+ memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
+ se.type = get_entry_type(type - 1);
+ ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
+
+ /* The entry parameters like present/writeable/cache type
+ * set to the same as i915's scratch page tree.
+ */
+ se.val64 |= _PAGE_PRESENT | _PAGE_RW;
+ if (type == GTT_TYPE_PPGTT_PDE_PT)
+ se.val64 |= PPAT_CACHED_INDEX;
+
+ for (i = 0; i < page_entry_num; i++)
+ ops->set_entry(p, &se, i, false, 0, vgpu);
+ }
+
kunmap_atomic(p);
- /* translate page to mfn */
- vaddr = page_address(gtt->scratch_page);
- mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
+ return 0;
+}
+
+static int release_scratch_page_tree(struct intel_vgpu *vgpu)
+{
+ int i;
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
- __free_page(gtt->scratch_page);
- gtt->scratch_page = NULL;
- return -ENXIO;
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ __free_page(vgpu->gtt.scratch_pt[i].page);
+ vgpu->gtt.scratch_pt[i].page = NULL;
+ vgpu->gtt.scratch_pt[i].page_mfn = 0;
+ }
}
- gtt->scratch_page_mfn = mfn;
- gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
return 0;
}
-static void release_scratch_page(struct intel_vgpu *vgpu)
+static int create_scratch_page_tree(struct intel_vgpu *vgpu)
{
- if (vgpu->gtt.scratch_page != NULL) {
- __free_page(vgpu->gtt.scratch_page);
- vgpu->gtt.scratch_page = NULL;
- vgpu->gtt.scratch_page_mfn = 0;
+ int i, ret;
+
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ ret = alloc_scratch_pages(vgpu, i);
+ if (ret)
+ goto err;
}
+
+ return 0;
+
+err:
+ release_scratch_page_tree(vgpu);
+ return ret;
}
/**
@@ -1995,7 +2007,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
gtt->ggtt_mm = ggtt_mm;
- return create_scratch_page(vgpu);
+ return create_scratch_page_tree(vgpu);
}
/**
@@ -2014,7 +2026,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
struct intel_vgpu_mm *mm;
ppgtt_free_all_shadow_page(vgpu);
- release_scratch_page(vgpu);
+ release_scratch_page_tree(vgpu);
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, list);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index e4dcde78f3f9..d250013bc37b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,36 @@ enum {
INTEL_GVT_MM_PPGTT,
};
+typedef enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+} intel_gvt_gtt_type_t;
+
struct intel_vgpu_mm {
int type;
bool initialized;
@@ -151,6 +181,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page;
+struct intel_vgpu_scratch_pt {
+ struct page *page;
+ unsigned long page_mfn;
+};
+
+
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
@@ -160,8 +196,8 @@ struct intel_vgpu_gtt {
atomic_t n_write_protected_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
- struct page *scratch_page;
- unsigned long scratch_page_mfn;
+ struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
+
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 385969a89216..398877c3d2fd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -44,11 +44,14 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
+static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+ .vgpu_create = intel_gvt_create_vgpu,
+ .vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_reset = intel_gvt_reset_vgpu,
};
/**
@@ -81,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvm");
+ symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+#endif
}
/* Fail to load MPT modules - bail out */
@@ -193,6 +198,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -270,10 +278,25 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_cmd_parser;
- gvt_dbg_core("gvt device creation is done\n");
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
+ &intel_gvt_ops);
+ if (ret) {
+ gvt_err("failed to register gvt-g host device: %d\n", ret);
+ goto out_clean_types;
+ }
+
+ gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 62fc9e3ac5c6..3d4223e8ebe3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -161,6 +161,20 @@ struct intel_vgpu {
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+ struct {
+ struct device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+ struct rb_root cache;
+ struct mutex cache_lock;
+ void *vfio_group;
+ struct notifier_block iommu_notifier;
+ } vdev;
+#endif
};
struct intel_gvt_gm {
@@ -190,6 +204,16 @@ struct intel_gvt_opregion {
u32 opregion_pa;
};
+#define NR_MAX_INTEL_VGPU_TYPES 20
+struct intel_vgpu_type {
+ char name[16];
+ unsigned int max_instance;
+ unsigned int avail_instance;
+ unsigned int low_gm_size;
+ unsigned int high_gm_size;
+ unsigned int fence;
+};
+
struct intel_gvt {
struct mutex lock;
struct drm_i915_private *dev_priv;
@@ -205,6 +229,8 @@ struct intel_gvt {
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct intel_vgpu_type *types;
+ unsigned int num_types;
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
@@ -231,6 +257,14 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt);
int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
@@ -330,11 +364,14 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
}
}
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *
- param);
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
+
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -369,10 +406,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
-int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
-int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
@@ -385,6 +422,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
+struct intel_gvt_ops {
+ int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
+ unsigned int);
+ int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
+ unsigned int);
+ struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
+ struct intel_vgpu_type *);
+ void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_reset)(struct intel_vgpu *);
+};
+
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 9ab1f95dddc5..522809710312 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1158,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- u32 mode = *(u32 *)p_data;
+ u32 mode;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
@@ -1275,19 +1278,18 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (offset) {
case 0x4ddc:
vgpu_vreg(vgpu, offset) = 0x8000003c;
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
break;
case 0x42080:
vgpu_vreg(vgpu, offset) = 0x8000;
+ /* WaCompressedResourceDisplayNewHashMode:skl */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
break;
default:
return -EINVAL;
}
- /**
- * TODO: need detect stepping info after gvt contain such information
- * 0x4ddc enabled after C0, 0x42080 enabled after E0.
- */
- I915_WRITE(reg, vgpu_vreg(vgpu, offset));
return 0;
}
@@ -1367,6 +1369,9 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
int rc = 0;
unsigned int id = 0;
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, offset) = 0;
+
switch (offset) {
case 0x4260:
id = RCS;
@@ -1392,6 +1397,23 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
return rc;
}
+static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ data |= RESET_CTL_READY_TO_RESET;
+ else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ data &= ~RESET_CTL_READY_TO_RESET;
+
+ vgpu_vreg(vgpu, offset) = data;
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
f, s, am, rm, d, r, w); \
@@ -1485,7 +1507,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
@@ -1494,7 +1516,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_D(GAM_ECOCHK, D_ALL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x9030, D_ALL);
MMIO_D(0x20a0, D_ALL);
MMIO_D(0x2420, D_ALL);
@@ -1503,7 +1525,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x2438, D_ALL);
MMIO_D(0x243c, D_ALL);
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
/* display */
@@ -2116,6 +2138,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_MBCTL, D_ALL);
MMIO_D(0x911c, D_ALL);
MMIO_D(0x9120, D_ALL);
+ MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
MMIO_D(0x48800, D_ALL);
@@ -2298,6 +2321,15 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+#define RING_REG(base) (base + 0xd0)
+ MMIO_RING_F(RING_REG, 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
+ ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ring_reset_ctl_write);
+#undef RING_REG
+
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
@@ -2345,7 +2377,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
- MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
@@ -2364,7 +2396,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
MMIO_D(0xfdc, D_BDW);
- MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
@@ -2375,10 +2407,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0xb10c, D_BDW);
MMIO_D(0xb110, D_BDW);
- MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL);
- MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL);
+ MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x83a4, D_BDW);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
@@ -2392,9 +2424,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_D(0x2248, D_BDW);
@@ -2425,6 +2457,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
MMIO_D(0x45504, D_SKL);
@@ -2574,8 +2607,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x51000, D_SKL);
MMIO_D(0x6c00c, D_SKL);
- MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
MMIO_D(0x20e0, D_SKL);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 027ef558d91c..30e543f5a703 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,21 +33,14 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
-struct intel_gvt_io_emulation_ops {
- int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
- int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
- int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
- int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
-};
-
-extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
-
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*host_init)(struct device *dev, void *gvt, const void *ops);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@@ -60,8 +53,7 @@ struct intel_gvt_mpt {
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
- unsigned long mfn, unsigned int nr, bool map,
- int type);
+ unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
new file mode 100644
index 000000000000..dc0365033157
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -0,0 +1,597 @@
+/*
+ * KVMGT - the implementation of Intel mediated pass-through framework for KVM
+ *
+ * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Jike Song <jike.song@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/eventfd.h>
+#include <linux/uuid.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
+ long npage, int prot, unsigned long *phys_pfn)
+{
+ return 0;
+}
+static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
+ long npage)
+{
+ return 0;
+}
+
+static const struct intel_gvt_ops *intel_gvt_ops;
+
+
+/* helper macros copied from vfio-pci */
+#define VFIO_PCI_OFFSET_SHIFT 40
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_region {
+ u32 type;
+ u32 subtype;
+ size_t size;
+ u32 flags;
+};
+
+struct kvmgt_pgfn {
+ gfn_t gfn;
+ struct hlist_node hnode;
+};
+
+struct kvmgt_guest_info {
+ struct kvm *kvm;
+ struct intel_vgpu *vgpu;
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
+
+struct gvt_dma {
+ struct rb_node node;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+};
+
+static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct rb_node *node = vgpu->vdev.cache.rb_node;
+ struct gvt_dma *ret = NULL;
+
+ while (node) {
+ struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else {
+ ret = itr;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct gvt_dma *entry;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find(vgpu, gfn);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+
+ return entry == NULL ? 0 : entry->pfn;
+}
+
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+{
+ struct gvt_dma *new, *itr;
+ struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+
+ new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
+ if (!new)
+ return;
+
+ new->gfn = gfn;
+ new->pfn = pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, node);
+
+ if (gfn == itr->gfn)
+ goto out;
+ else if (gfn < itr->gfn)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &vgpu->vdev.cache);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+
+out:
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ kfree(new);
+}
+
+static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
+ struct gvt_dma *entry)
+{
+ rb_erase(&entry->node, &vgpu->vdev.cache);
+ kfree(entry);
+}
+
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
+{
+ struct device *dev = vgpu->vdev.mdev;
+ struct gvt_dma *this;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ this = __gvt_cache_find(vgpu, gfn);
+ if (!this) {
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return;
+ }
+
+ pfn = this->pfn;
+ WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ __gvt_cache_remove_entry(vgpu, this);
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_init(struct intel_vgpu *vgpu)
+{
+ vgpu->vdev.cache = RB_ROOT;
+ mutex_init(&vgpu->vdev.cache_lock);
+}
+
+static void gvt_cache_destroy(struct intel_vgpu *vgpu)
+{
+ struct gvt_dma *dma;
+ struct rb_node *node = NULL;
+ struct device *dev = vgpu->vdev.mdev;
+ unsigned long pfn;
+
+ mutex_lock(&vgpu->vdev.cache_lock);
+ while ((node = rb_first(&vgpu->vdev.cache))) {
+ dma = rb_entry(node, struct gvt_dma, node);
+ pfn = dma->pfn;
+
+ kvmgt_unpin_pages(dev, &pfn, 1);
+ __gvt_cache_remove_entry(vgpu, dma);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
+}
+
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static struct attribute *type_attrs[] = {
+ NULL,
+};
+
+static struct attribute_group *intel_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = type_attrs;
+ intel_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = intel_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = intel_vgpu_type_groups[i];
+ kfree(group);
+ }
+}
+
+static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+{
+ hash_init(info->ptable);
+}
+
+static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+{
+ struct kvmgt_pgfn *p;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static struct kvmgt_pgfn *
+__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p, *res = NULL;
+
+ hash_for_each_possible(info->ptable, p, hnode, gfn) {
+ if (gfn == p->gfn) {
+ res = p;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ return !!p;
+}
+
+static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ return;
+
+ p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ if (WARN(!p, "gfn: 0x%llx\n", gfn))
+ return;
+
+ p->gfn = gfn;
+ hash_add(info->ptable, &p->hnode, gfn);
+}
+
+static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
+ gfn_t gfn)
+{
+ struct kvmgt_pgfn *p;
+
+ p = __kvmgt_protect_table_find(info, gfn);
+ if (p) {
+ hash_del(&p->hnode);
+ kfree(p);
+ }
+}
+
+static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+{
+ if (!intel_gvt_init_vgpu_type_groups(gvt))
+ return -EFAULT;
+
+ intel_gvt_ops = ops;
+
+ /* MDEV is not yet available */
+ return -ENODEV;
+}
+
+static void kvmgt_host_exit(struct device *dev, void *gvt)
+{
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+}
+
+static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_add(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvm *kvm = info->kvm;
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+
+ spin_lock(&kvm->mmu_lock);
+
+ if (!kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
+ kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return 0;
+}
+
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
+{
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
+ (void *)val, len);
+}
+
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ int i;
+ gfn_t gfn;
+ struct kvmgt_guest_info *info = container_of(node,
+ struct kvmgt_guest_info, track_node);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < slot->npages; i++) {
+ gfn = slot->base_gfn + i;
+ if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+ kvmgt_protect_table_del(info, gfn);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static bool kvmgt_check_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char s[12];
+ unsigned int *i;
+
+ eax = KVM_CPUID_SIGNATURE;
+ ebx = ecx = edx = 0;
+
+ asm volatile ("cpuid"
+ : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ :
+ : "cc", "memory");
+ i = (unsigned int *)s;
+ i[0] = ebx;
+ i[1] = ecx;
+ i[2] = edx;
+
+ return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
+}
+
+/**
+ * NOTE:
+ * It's actually impossible to check if we are running in KVM host,
+ * since the "KVM host" is simply native. So we only dectect guest here.
+ */
+static int kvmgt_detect_host(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
+ return -ENODEV;
+ }
+#endif
+ return kvmgt_check_guest() ? -ENODEV : 0;
+}
+
+static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static void kvmgt_detach_vgpu(unsigned long handle)
+{
+ /* nothing to do here */
+}
+
+static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+{
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct intel_vgpu *vgpu = info->vgpu;
+
+ if (vgpu->vdev.msi_trigger)
+ return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+
+ return false;
+}
+
+static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+{
+ unsigned long pfn;
+ struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ int rc;
+
+ pfn = gvt_cache_find(info->vgpu, gfn);
+ if (pfn != 0)
+ return pfn;
+
+ rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (rc != 1) {
+ gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
+ return 0;
+ }
+
+ gvt_cache_add(info->vgpu, gfn, pfn);
+ return pfn;
+}
+
+static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+{
+ unsigned long pfn;
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ pfn = kvmgt_gfn_to_pfn(handle, gfn);
+ if (!pfn)
+ return NULL;
+
+ return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
+}
+
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
+{
+ void *hva = NULL;
+
+ hva = kvmgt_gpa_to_hva(handle, gpa);
+ if (!hva)
+ return -EFAULT;
+
+ if (write)
+ memcpy(hva, buf, len);
+ else
+ memcpy(buf, hva, len);
+
+ return 0;
+}
+
+static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+}
+
+static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+}
+
+static unsigned long kvmgt_virt_to_pfn(void *addr)
+{
+ return PFN_DOWN(__pa(addr));
+}
+
+struct intel_gvt_mpt kvmgt_mpt = {
+ .detect_host = kvmgt_detect_host,
+ .host_init = kvmgt_host_init,
+ .host_exit = kvmgt_host_exit,
+ .attach_vgpu = kvmgt_attach_vgpu,
+ .detach_vgpu = kvmgt_detach_vgpu,
+ .inject_msi = kvmgt_inject_msi,
+ .from_virt_to_mfn = kvmgt_virt_to_pfn,
+ .set_wp_page = kvmgt_write_protect_add,
+ .unset_wp_page = kvmgt_write_protect_remove,
+ .read_gpa = kvmgt_read_gpa,
+ .write_gpa = kvmgt_write_gpa,
+ .gfn_to_mfn = kvmgt_gfn_to_pfn,
+};
+EXPORT_SYMBOL_GPL(kvmgt_mpt);
+
+static int __init kvmgt_init(void)
+{
+ return 0;
+}
+
+static void __exit kvmgt_exit(void)
+{
+}
+
+module_init(kvmgt_init);
+module_exit(kvmgt_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 585b01f63254..09c9450a1946 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -67,10 +67,9 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
@@ -179,10 +178,9 @@ err:
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9dc739a01892..87d5b5e366a3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -87,10 +87,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
})
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
-int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
- unsigned int bytes);
-int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
- unsigned int bytes);
+
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67858782d327..1af5830c0a56 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -56,6 +56,35 @@ static inline int intel_gvt_hypervisor_detect_host(void)
}
/**
+ * intel_gvt_hypervisor_host_init - init GVT-g host side
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+static inline int intel_gvt_hypervisor_host_init(struct device *dev,
+ void *gvt, const void *ops)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_init)
+ return 0;
+
+ return intel_gvt_host.mpt->host_init(dev, gvt, ops);
+}
+
+/**
+ * intel_gvt_hypervisor_host_exit - exit GVT-g host side
+ */
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
+ void *gvt)
+{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->host_exit)
+ return;
+
+ intel_gvt_host.mpt->host_exit(dev, gvt);
+}
+
+/**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor.
*
@@ -64,6 +93,10 @@ static inline int intel_gvt_hypervisor_detect_host(void)
*/
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->attach_vgpu)
+ return 0;
+
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
}
@@ -76,6 +109,10 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
*/
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{
+ /* optional to provide */
+ if (!intel_gvt_host.mpt->detach_vgpu)
+ return;
+
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
}
@@ -224,11 +261,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
}
-enum {
- GVT_MAP_APERTURE = 0,
- GVT_MAP_OPREGION,
-};
-
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
@@ -236,7 +268,6 @@ enum {
* @mfn: host PFN
* @nr: amount of PFNs
* @map: map or unmap
- * @type: map type
*
* Returns:
* Zero on success, negative error code if failed.
@@ -244,10 +275,14 @@ enum {
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr,
- bool map, int type)
+ bool map)
{
+ /* a MPT implementation could have MMIO mapped elsewhere */
+ if (!intel_gvt_host.mpt->map_gfn_to_mfn)
+ return 0;
+
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
- map, type);
+ map);
}
/**
@@ -263,6 +298,10 @@ static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{
+ /* a MPT implementation could have MMIO trapped elsewhere */
+ if (!intel_gvt_host.mpt->set_trap_area)
+ return 0;
+
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 95218913b0bc..d2a0fbc896c3 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
- mfn, 1, map, GVT_MAP_OPREGION);
+ mfn, 1, map);
if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret;
@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
*/
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- int i;
-
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va)
return;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- vunmap(vgpu_opregion(vgpu)->va);
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- if (vgpu_opregion(vgpu)->pages[i]) {
- put_page(vgpu_opregion(vgpu)->pages[i]);
- vgpu_opregion(vgpu)->pages[i] = NULL;
- }
- }
- } else {
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER);
- }
- vgpu_opregion(vgpu)->va = NULL;
+ vgpu_opregion(vgpu)->va = NULL;
+ }
}
/**
@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
ret = map_vgpu_opregion(vgpu, true);
if (ret)
return ret;
- } else {
- gvt_dbg_core("emulate opregion from userspace\n");
-
- /*
- * If opregion pages are not allocated from host kenrel,
- * most of the params are meaningless
- */
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
- 0, /* not used */
- 0, /* not used */
- 2, /* not used */
- 1,
- GVT_MAP_OPREGION);
- if (ret)
- return ret;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 3af894b3d257..44136b1f3aab 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -152,6 +152,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg(vgpu, regs[ring_id]) = 0;
intel_uncore_forcewake_put(dev_priv, fw);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 1df6a5460f3e..678b0be85376 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -36,12 +36,10 @@
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_execlist *execlist;
enum intel_engine_id i;
struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- execlist = &vgpu->execlist[i];
if (!list_empty(workload_q_head(vgpu, i)))
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 18acb45dd14d..f898df38dd9a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- dst = kmap_atomic(page);
+ dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
- kunmap_atomic(dst);
+ kunmap(page);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
return 0;
}
@@ -160,8 +160,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
@@ -174,6 +172,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n");
@@ -185,40 +185,35 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
- mutex_lock(&gvt->lock);
-
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
- goto err;
+ goto out;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto err;
+ goto out;
ret = populate_shadow_context(workload);
if (ret)
- goto err;
+ goto out;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
- goto err;
+ goto out;
}
- mutex_unlock(&gvt->lock);
-
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request_no_flush(rq);
+ ret = 0;
workload->dispatched = true;
- return 0;
-err:
- workload->status = ret;
-
- mutex_unlock(&gvt->lock);
+out:
+ if (ret)
+ workload->status = ret;
i915_add_request_no_flush(rq);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -318,10 +313,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- src = kmap_atomic(page);
+ src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
- kunmap_atomic(src);
+ kunmap(page);
i++;
}
@@ -329,7 +324,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -347,7 +342,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
@@ -438,9 +433,9 @@ static int workload_thread(void *priv)
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+ mutex_lock(&gvt->lock);
ret = dispatch_workload(workload);
- mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+ mutex_unlock(&gvt->lock);
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
@@ -455,15 +450,15 @@ static int workload_thread(void *priv)
if (lret < 0) {
workload->status = lret;
gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
}
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
- mutex_lock(&gvt->dev_priv->drm.struct_mutex);
complete_current_workload(gvt, ring_id);
- mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
i915_gem_request_put(fetch_and_zero(&workload->req));
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 4f54005b976d..4f64845d8a4c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -46,9 +46,13 @@ int setup_vgpu_mmio(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
- if (!vgpu->mmio.vreg)
- return -ENOMEM;
+ if (vgpu->mmio.vreg)
+ memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
+ else {
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+ }
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
@@ -95,6 +99,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
vgpu->cfg_space.bar[i].size = pci_resource_len(
@@ -133,6 +138,106 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
}
/**
+ * intel_gvt_init_vgpu_types - initialize vGPU type list
+ * @gvt : GVT device
+ *
+ * Initialize vGPU type list based on available resource.
+ *
+ */
+int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
+{
+ unsigned int num_types;
+ unsigned int i, low_avail;
+ unsigned int min_low;
+
+ /* vGPU type name is defined as GVTg_Vx_y which contains
+ * physical GPU generation type and 'y' means maximum vGPU
+ * instances user can create on one physical GPU for this
+ * type.
+ *
+ * Depend on physical SKU resource, might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
+ * different types of vGPU on same physical GPU depending on
+ * available resource. Each vGPU type will have "avail_instance"
+ * to indicate how many vGPU instance can be created for this
+ * type.
+ *
+ * Currently use static size here as we init type earlier..
+ */
+ low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ num_types = 4;
+
+ gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
+ GFP_KERNEL);
+ if (!gvt->types)
+ return -ENOMEM;
+
+ min_low = MB_TO_BYTES(32);
+ for (i = 0; i < num_types; ++i) {
+ if (low_avail / min_low == 0)
+ break;
+ gvt->types[i].low_gm_size = min_low;
+ gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].fence = 4;
+ gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ if (IS_GEN8(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V4_%u",
+ gvt->types[i].max_instance);
+ else if (IS_GEN9(gvt->dev_priv))
+ sprintf(gvt->types[i].name, "GVTg_V5_%u",
+ gvt->types[i].max_instance);
+
+ min_low <<= 1;
+ gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance,
+ gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+
+ gvt->num_types = i;
+ return 0;
+}
+
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
+{
+ kfree(gvt->types);
+}
+
+static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+{
+ int i;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+ unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+
+ /* Need to depend on maxium hw resource size but keep on
+ * static config for now.
+ */
+ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
+ high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
+ fence_min = fence_avail / gvt->types[i].fence;
+ total_min = min(min(low_gm_min, high_gm_min), fence_min);
+ gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
+ total_min);
+
+ gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
+ gvt->types[i].high_gm_size, gvt->types[i].fence);
+ }
+}
+
+/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
*
@@ -166,20 +271,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
clean_vgpu_mmio(vgpu);
vfree(vgpu);
+ intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
}
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @param: vGPU creation parameters
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
struct intel_vgpu *vgpu;
@@ -224,15 +320,9 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- ret = intel_vgpu_init_opregion(vgpu, 0);
- if (ret)
- goto out_clean_gtt;
- }
-
ret = intel_vgpu_init_display(vgpu);
if (ret)
- goto out_clean_opregion;
+ goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu);
if (ret)
@@ -257,8 +347,6 @@ out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
-out_clean_opregion:
- intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
@@ -272,3 +360,49 @@ out_free_vgpu:
mutex_unlock(&gvt->lock);
return ERR_PTR(ret);
}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @type: type of the vGPU to create
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_type *type)
+{
+ struct intel_vgpu_creation_params param;
+ struct intel_vgpu *vgpu;
+
+ param.handle = 0;
+ param.low_gm_sz = type->low_gm_size;
+ param.high_gm_sz = type->high_gm_size;
+ param.fence_sz = type->fence;
+
+ /* XXX current param based on MB */
+ param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
+ param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
+
+ vgpu = __intel_gvt_create_vgpu(gvt, &param);
+ if (IS_ERR(vgpu))
+ return vgpu;
+
+ /* calculate left instance change for types */
+ intel_gvt_update_vgpu_types(gvt);
+
+ return vgpu;
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to reset a virtual GPU.
+ *
+ */
+void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
+{
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7166a1a6265d..96407f684f7f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -547,11 +547,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
+ struct intel_engine_cs *engine = work->flip_queued_req->engine;
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
- i915_gem_request_get_seqno(work->flip_queued_req),
+ work->flip_queued_req->global_seqno,
atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
@@ -631,8 +631,9 @@ static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
- seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix,
+ seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name);
}
@@ -1761,8 +1762,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "self-refresh: %s\n",
- sr_enabled ? "enabled" : "disabled");
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
return 0;
}
@@ -3216,6 +3216,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
u32 ptr, read, write;
+ struct rb_node *rb;
seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
@@ -3254,11 +3255,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "\t\tELSP[1] idle\n");
rcu_read_unlock();
- spin_lock_irq(&engine->execlist_lock);
- list_for_each_entry(rq, &engine->execlist_queue, execlist_link) {
+ spin_lock_irq(&engine->timeline->lock);
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
+ rq = rb_entry(rb, typeof(*rq), priotree.node);
print_request(m, rq, "\t\tQ ");
}
- spin_unlock_irq(&engine->execlist_lock);
+ spin_unlock_irq(&engine->timeline->lock);
} else if (INTEL_GEN(dev_priv) > 6) {
seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0213a3090ab3..445fec9c2841 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
- if (INTEL_INFO(dev)->num_pipes == 0) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
return;
}
@@ -323,6 +323,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = i915_gem_mmap_gtt_version();
break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = dev_priv->engine[RCS] &&
+ dev_priv->engine[RCS]->schedule;
+ break;
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -374,12 +378,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -406,7 +410,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -420,7 +424,7 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -460,7 +464,7 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
@@ -491,7 +495,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
- intel_modeset_vga_set_state(dev, state);
+ intel_modeset_vga_set_state(to_i915(dev), state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -607,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
ret = intel_fbdev_init(dev);
@@ -879,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
@@ -1168,8 +1172,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
/**
* i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
@@ -1438,7 +1442,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev);
+ i915_gem_suspend_gtt_mappings(dev_priv);
i915_save_state(dev);
@@ -1512,7 +1516,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
- if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -2422,7 +2426,7 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6f4a6bcf6ed4..56002a52936d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -60,11 +60,15 @@
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_vma.h"
+
#include "intel_gvt.h"
/* General customization:
@@ -72,8 +76,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20161108"
-#define DRIVER_TIMESTAMP 1478587895
+#define DRIVER_DATE "20161121"
+#define DRIVER_TIMESTAMP 1479717903
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -125,6 +129,11 @@ static inline const char *onoff(bool v)
return v ? "on" : "off";
}
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -459,23 +468,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-struct drm_i915_fence_reg {
- struct list_head link;
- struct drm_i915_private *i915;
- struct i915_vma *vma;
- int pin_count;
- int id;
- /**
- * Whether the tiling parameters for the currently
- * associated fence register have changed. Note that
- * for the purposes of tracking tiling changes we also
- * treat the unfenced register, the register slot that
- * the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- bool dirty;
-};
-
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -487,6 +479,7 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
@@ -500,8 +493,12 @@ struct drm_i915_display_funcs {
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate);
- void (*initial_watermarks)(struct intel_crtc_state *cstate);
- void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
@@ -562,6 +559,18 @@ enum forcewake_domains {
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
@@ -668,7 +677,7 @@ struct intel_csr {
func(is_skylake); \
func(is_broxton); \
func(is_kabylake); \
- func(is_preliminary); \
+ func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(has_csr); \
@@ -696,7 +705,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv)
+ func(supports_tv); \
+ func(has_decoupled_mmio)
struct sseu_dev_info {
u8 slice_mask;
@@ -949,6 +959,7 @@ struct i915_gem_context {
/* Unique identifier for this context, used by the hw for tracking */
unsigned int hw_id;
u32 user_handle;
+ int priority; /* greater priorities are serviced first */
u32 ggtt_alignment;
@@ -1791,6 +1802,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
+ struct kmem_cache *dependencies;
const struct intel_device_info info;
@@ -2053,13 +2065,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /*
- * The skl_wm_values structure is a bit too big for stack
- * allocation, so we keep the staging struct where we store
- * intermediate results here instead.
- */
- struct skl_wm_values skl_results;
-
/* current hardware state */
union {
struct ilk_wm_values hw;
@@ -2179,31 +2184,6 @@ enum hdmi_force_audio {
#define I915_GTT_OFFSET_NONE ((u32)-1)
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-wise. This
@@ -2225,292 +2205,6 @@ struct drm_i915_gem_object_ops {
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- /** List of VMAs backed by this object */
- struct list_head vma_list;
- struct rb_root vma_tree;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- struct list_head global_link;
- union {
- struct rcu_head rcu;
- struct llist_node freed;
- };
-
- /**
- * Whether the object is currently in the GGTT mmap.
- */
- struct list_head userfault_link;
-
- /** Used in execbuf to temporarily hold a ref */
- struct list_head obj_exec_link;
-
- struct list_head batch_pool_link;
-
- unsigned long flags;
-
- /**
- * Have we taken a reference for the object for incomplete GPU
- * activity?
- */
-#define I915_BO_ACTIVE_REF 0
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned long gt_ro:1;
- unsigned int cache_level:3;
- unsigned int cache_dirty:1;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int active_count;
- unsigned int pin_display;
-
- struct {
- struct mutex lock; /* protects the pages and their use */
- atomic_t pages_pin_count;
-
- struct sg_table *pages;
- void *mapping;
-
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * This is set if the object has been written to since the
- * pages were last acquired.
- */
- bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
- } mm;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct reservation_object *resv;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct reservation_object __builtin_resv;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-/**
- * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
- * @filp: DRM file private date
- * @handle: userspace handle
- *
- * Returns:
- *
- * A pointer to the object named by the handle if such exists on @filp, NULL
- * otherwise. This object is only valid whilst under the RCU read lock, and
- * note carefully the object may be in the process of being destroyed.
- */
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
-{
-#ifdef CONFIG_LOCKDEP
- WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
-#endif
- return idr_find(&file->object_idr, handle);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- struct drm_i915_gem_object *obj;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
- rcu_read_unlock();
-
- return obj;
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_reference(&obj->base);
- return obj;
-}
-
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- __drm_gem_object_unreference(&obj->base);
-}
-
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
-static inline bool
-i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
-{
- return atomic_read(&obj->base.refcount.refcount) == 0;
-}
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline bool
-i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return obj->active_count;
-}
-
-static inline bool
-i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
-{
- return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
-
-static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
-static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
-{
- i915_gem_object_get(vma->obj);
- return vma;
-}
-
-static inline void i915_vma_put(struct i915_vma *vma)
-{
- i915_gem_object_put(vma->obj);
-}
-
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2683,24 +2377,19 @@ struct drm_i915_cmd_table {
int count;
};
-/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
- struct drm_i915_private *__p; \
- if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
- __p = (struct drm_i915_private *)p; \
- else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
- __p = to_i915((struct drm_device *)p); \
- else \
- BUILD_BUG(); \
- __p; \
-})
-#define INTEL_INFO(p) (&__I915__(p)->info)
+static inline const struct intel_device_info *
+intel_info(const struct drm_i915_private *dev_priv)
+{
+ return &dev_priv->info;
+}
+
+#define INTEL_INFO(dev_priv) intel_info((dev_priv))
#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
-#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
/*
@@ -2797,7 +2486,7 @@ struct drm_i915_cmd_table {
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
-#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
+#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
@@ -2851,28 +2540,31 @@ struct drm_i915_cmd_table {
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+ (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
-#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
+#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
+#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
-#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
+#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_contexts)
+#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
+
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
+ ((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
@@ -2889,8 +2581,8 @@ struct drm_i915_cmd_table {
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
-#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
+#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2898,24 +2590,24 @@ struct drm_i915_cmd_table {
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
+#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
+#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
-#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
@@ -2925,13 +2617,13 @@ struct drm_i915_cmd_table {
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
-#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
-#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
+#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
+#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2971,6 +2663,8 @@ struct drm_i915_cmd_table {
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
+
#include "i915_trace.h"
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -3222,13 +2916,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
-int __must_check i915_vma_unbind(struct i915_vma *vma);
-void i915_vma_close(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
@@ -3405,10 +3092,10 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
@@ -3419,6 +3106,11 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int priority);
+#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3478,57 +3170,13 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-/* i915_gem_fence.c */
+/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
-/**
- * i915_vma_pin_fence - pin fencing state
- * @vma: vma to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * vma (and its object) is ready to be used as a scanout target. Fencing
- * status must be synchronize first by calling i915_vma_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_vma_unpin_fence().
- *
- * Returns:
- *
- * True if the vma has a fence, false otherwise.
- */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_vma_unpin_fence - unpin fencing state
- * @vma: vma to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_vma_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-static inline void
-i915_vma_unpin_fence(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
-}
-
-void i915_gem_restore_fences(struct drm_device *dev);
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
@@ -3631,7 +3279,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3833,10 +3481,11 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
-extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
+ bool state);
extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_device *dev);
-extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
+extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
@@ -3855,7 +3504,7 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 41e697e5dbcd..902fa427c196 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -48,7 +49,7 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
- return HAS_LLC(dev) || level != I915_CACHE_NONE;
+ return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -220,15 +221,17 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
}
static void
-__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- i915_gem_clflush_object(obj, false);
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ drm_clflush_sg(pages);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -238,7 +241,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __i915_gem_object_release_shmem(obj);
+ __i915_gem_object_release_shmem(obj, pages);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -433,6 +436,70 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
return timeout;
}
+static void __fence_set_priority(struct dma_fence *fence, int prio)
+{
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+ if (!engine->schedule)
+ return;
+
+ engine->schedule(rq, prio);
+}
+
+static void fence_set_priority(struct dma_fence *fence, int prio)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], prio);
+ } else {
+ __fence_set_priority(fence, prio);
+ }
+}
+
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ int prio)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], prio);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->resv);
+ }
+
+ if (excl) {
+ fence_set_priority(excl, prio);
+ dma_fence_put(excl);
+ }
+ return 0;
+}
+
/**
* Waits for rendering to the object to be completed
* @obj: i915 gem object
@@ -1757,7 +1824,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ret = -EFAULT;
goto err_unlock;
}
@@ -2150,7 +2217,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- __i915_gem_object_release_shmem(obj);
+ __i915_gem_object_release_shmem(obj, pages);
i915_gem_gtt_finish_pages(obj, pages);
@@ -2232,6 +2299,30 @@ static unsigned int swiotlb_max_size(void)
#endif
}
+static void i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ return;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ /* called before being DMA mapped, no need to copy sg->dma_* */
+ new_sg = sg_next(new_sg);
+ }
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+}
+
static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2296,7 +2387,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto err_pages;
+ goto err_sg;
}
}
if (!i ||
@@ -2317,6 +2408,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
@@ -2326,8 +2420,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return st;
-err_pages:
+err_sg:
sg_mark_end(sg);
+err_pages:
for_each_sgt_page(page, sgt_iter, st)
put_page(page);
sg_free_table(st);
@@ -2657,7 +2752,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
- i915_gem_restore_fences(&dev_priv->drm);
+ i915_gem_restore_fences(dev_priv);
if (dev_priv->gt.awake) {
intel_sanitize_gt_powersave(dev_priv);
@@ -2689,12 +2784,17 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
- spin_lock(&engine->execlist_lock);
- INIT_LIST_HEAD(&engine->execlist_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
i915_gem_request_put(engine->execlist_port[0].request);
i915_gem_request_put(engine->execlist_port[1].request);
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- spin_unlock(&engine->execlist_lock);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
}
@@ -2892,117 +2992,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static void __i915_vma_iounmap(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_pinned(vma));
-
- if (vma->iomap == NULL)
- return;
-
- io_mapping_unmap(vma->iomap);
- vma->iomap = NULL;
-}
-
-int i915_vma_unbind(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- unsigned long active;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- /* First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- */
- active = i915_vma_get_active(vma);
- if (active) {
- int idx;
-
- /* When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
-
- for_each_active(active, idx) {
- ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
- if (ret)
- break;
- }
-
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
- }
-
- if (i915_vma_is_pinned(vma))
- return -EBUSY;
-
- if (!drm_mm_node_allocated(&vma->node))
- goto destroy;
-
- GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- if (i915_vma_is_map_and_fenceable(vma)) {
- /* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- __i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
- }
-
- if (likely(!vma->vm->closed)) {
- trace_i915_vma_unbind(vma);
- vma->vm->unbind_vma(vma);
- }
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
-
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
- if (vma->pages != obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
-
-destroy:
- if (unlikely(i915_vma_is_closed(vma)))
- i915_vma_destroy(vma);
-
- return 0;
-}
-
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
int ret, i;
@@ -3018,201 +3007,43 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
- struct i915_gem_timeline *tl;
int ret;
- list_for_each_entry(tl, &i915->gt.timelines, link) {
- ret = wait_for_timeline(tl, flags);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
-{
- struct drm_mm_node *gtt_space = &vma->node;
- struct drm_mm_node *other;
-
- /*
- * On some machines we have to be careful when putting differing types
- * of snoopable memory together to avoid the prefetcher crossing memory
- * domains and dying. During vm initialisation, we decide whether or not
- * these constraints apply and set the drm_mm.color_adjust
- * appropriately.
- */
- if (vma->vm->mm.color_adjust == NULL)
- return true;
-
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
-
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
- return false;
+ if (flags & I915_WAIT_LOCKED) {
+ struct i915_gem_timeline *tl;
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
- return false;
+ lockdep_assert_held(&i915->drm.struct_mutex);
- return true;
-}
-
-/**
- * i915_vma_insert - finds a slot for the vma in its address space
- * @vma: the vma
- * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: required alignment
- * @flags: mask of PIN_* flags to use
- *
- * First we try to allocate some free space that meets the requirements for
- * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
- * preferrably the oldest idle entry to make room for the new VMA.
- *
- * Returns:
- * 0 on success, negative error code otherwise.
- */
-static int
-i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- struct drm_i915_gem_object *obj = vma->obj;
- u64 start, end;
- int ret;
-
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
-
- size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
-
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
-
- start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-
- end = vma->vm->total;
- if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
- if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
-
- /* If binding the object/GGTT view requires more space than the entire
- * aperture has, reject it early before evicting everything in a vain
- * attempt to find space.
- */
- if (size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
- size, obj->base.size,
- flags & PIN_MAPPABLE ? "mappable" : "total",
- end);
- return -E2BIG;
- }
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (flags & PIN_OFFSET_FIXED) {
- u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
- ret = -EINVAL;
- goto err_unpin;
- }
-
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
- goto err_unpin;
+ return ret;
}
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
- goto err_unpin;
- }
-
- GEM_BUG_ON(vma->node.start < start);
- GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+ if (ret)
+ return ret;
}
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
-
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- obj->bind_count++;
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
-bool
-i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (!obj->mm.pages)
- return false;
+ return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
- return false;
+ return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3224,14 +3055,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
- return false;
+ return;
}
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
-
- return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3277,9 +3106,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
+ i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
obj->base.write_domain = 0;
@@ -3378,12 +3205,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct i915_vma *vma;
- int ret = 0;
+ int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->cache_level == cache_level)
- goto out;
+ return 0;
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
@@ -3435,7 +3262,8 @@ restart:
if (ret)
return ret;
- if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -3477,20 +3305,14 @@ restart:
}
}
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
+ cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ obj->cache_dirty = true;
+
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
-out:
- /* Flush the dirty CPU caches to the backing storage so that the
- * object is now coherent at its new cache level (with respect
- * to the access domain).
- */
- if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
- if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(to_i915(obj->base.dev));
- }
-
return 0;
}
@@ -3646,7 +3468,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- i915_gem_object_flush_cpu_write_domain(obj);
+ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
+ if (obj->cache_dirty) {
+ i915_gem_clflush_object(obj, true);
+ intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ }
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3798,100 +3624,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return ret < 0 ? ret : 0;
}
-static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- if (!drm_mm_node_allocated(&vma->node))
- return false;
-
- if (vma->node.size < size)
- return true;
-
- if (alignment && vma->node.start & (alignment - 1))
- return true;
-
- if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
- return true;
-
- if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
- return true;
-
- return false;
-}
-
-void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
-
- /*
- * Explicitly disable for rotated VMA since the display does not
- * need the fence and the VMA is not accessible to other users.
- */
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
- vma->flags |= I915_VMA_CAN_FENCE;
- else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
-{
- unsigned int bound = vma->flags;
- int ret;
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
-
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err;
- }
-
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err;
- }
-
- ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
- if (ret)
- goto err;
-
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
-
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
-
-err:
- __i915_vma_unpin(vma);
- return ret;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -4156,6 +3888,16 @@ out:
return err;
}
+static void
+frontbuffer_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
+}
+
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -4173,6 +3915,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
+ init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
@@ -4235,7 +3978,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev)) {
+ if (HAS_LLC(dev_priv)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4481,7 +4224,7 @@ int i915_gem_suspend(struct drm_device *dev)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- if (HAS_HW_CONTEXTS(dev)) {
+ if (HAS_HW_CONTEXTS(dev_priv)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
@@ -4500,7 +4243,7 @@ void i915_gem_resume(struct drm_device *dev)
WARN_ON(dev_priv->gt.awake);
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
+ i915_gem_restore_gtt_mappings(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4511,11 +4254,9 @@ void i915_gem_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void i915_gem_init_swizzling(struct drm_device *dev)
+void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen < 5 ||
+ if (INTEL_GEN(dev_priv) < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
@@ -4574,7 +4315,7 @@ i915_gem_init_hw(struct drm_device *dev)
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
+ if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev_priv))
@@ -4586,14 +4327,14 @@ i915_gem_init_hw(struct drm_device *dev)
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
- i915_gem_init_swizzling(dev);
+ i915_gem_init_swizzling(dev_priv);
/*
* At least 830 can leave some of the unused rings
@@ -4605,7 +4346,7 @@ i915_gem_init_hw(struct drm_device *dev)
BUG_ON(!dev_priv->kernel_context);
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
@@ -4720,7 +4461,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
@@ -4744,9 +4484,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
fence->id = i;
list_add_tail(&fence->link, &dev_priv->mm.fence_list);
}
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
- i915_gem_detect_bit_6_swizzle(dev);
+ i915_gem_detect_bit_6_swizzle(dev_priv);
}
int
@@ -4770,14 +4510,18 @@ i915_gem_load_init(struct drm_device *dev)
if (!dev_priv->requests)
goto err_vmas;
+ dev_priv->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!dev_priv->dependencies)
+ goto err_requests;
+
mutex_lock(&dev_priv->drm.struct_mutex);
INIT_LIST_HEAD(&dev_priv->gt.timelines);
- err = i915_gem_timeline_init(dev_priv,
- &dev_priv->gt.global_timeline,
- "[execution]");
+ err = i915_gem_timeline_init__global(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (err)
- goto err_requests;
+ goto err_dependencies;
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
@@ -4805,6 +4549,8 @@ i915_gem_load_init(struct drm_device *dev)
return 0;
+err_dependencies:
+ kmem_cache_destroy(dev_priv->dependencies);
err_requests:
kmem_cache_destroy(dev_priv->requests);
err_vmas:
@@ -4821,6 +4567,12 @@ void i915_gem_load_cleanup(struct drm_device *dev)
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+ WARN_ON(!list_empty(&dev_priv->gt.timelines));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4905,7 +4657,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 735580d72eb1..51ec793f2e20 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,7 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
-#define GEM_BUG_ON(expr)
+#define GEM_BUG_ON(expr) do { } while (0)
#endif
#define I915_NUM_ENGINES 5
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6dd475735f0a..1f94b8d6d83d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -476,6 +476,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
+ ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index fb5b44339f71..097d9d8c2315 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -287,7 +287,7 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
if (DBG_USE_CPU_RELOC)
return DBG_USE_CPU_RELOC > 0;
- return (HAS_LLC(obj->base.dev) ||
+ return (HAS_LLC(to_i915(obj->base.dev)) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -833,7 +833,7 @@ need_reloc_mappable(struct i915_vma *vma)
return false;
/* See also use_cpu_reloc() */
- if (HAS_LLC(vma->obj->base.dev))
+ if (HAS_LLC(to_i915(vma->obj->base.dev)))
return false;
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&vma->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
+ if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+ i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(dev)) {
+ if (!HAS_RESOURCE_STREAMER(dev_priv)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
@@ -1878,7 +1877,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(to_i915(dev)) < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index cd59dbc6588c..0efa3571afc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -368,15 +368,14 @@ i915_vma_get_fence(struct i915_vma *vma)
/**
* i915_gem_restore_fences - restore fence state
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -451,15 +450,14 @@ void i915_gem_restore_fences(struct drm_device *dev)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -473,7 +471,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
new file mode 100644
index 000000000000..22c4a2d01adf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_FENCE_REG_H__
+#define __I915_FENCE_REG_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct i915_vma;
+
+struct drm_i915_fence_reg {
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
+ int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a5fafa3d4fc8..b4bde1452f2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,13 +96,6 @@
*
*/
-static inline struct i915_ggtt *
-i915_vm_to_ggtt(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!i915_is_ggtt(vm));
- return container_of(vm, struct i915_ggtt, base);
-}
-
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -327,10 +320,10 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_device *dev,
+static int __setup_page_dma(struct drm_i915_private *dev_priv,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *kdev = &dev->pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
@@ -347,14 +340,16 @@ static int __setup_page_dma(struct drm_device *dev,
return 0;
}
-static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static int setup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, I915_GFP_DMA);
+ return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
}
-static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+static void cleanup_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (WARN_ON(!p->page))
return;
@@ -387,8 +382,8 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
#define kunmap_px(ppgtt, vaddr) \
kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
-#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
-#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
+#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
#define fill32_px(dev_priv, px, v) \
fill_page_dma_32((dev_priv), px_base(px), (v))
@@ -416,24 +411,23 @@ static void fill_page_dma_32(struct drm_i915_private *dev_priv,
}
static int
-setup_scratch_page(struct drm_device *dev,
+setup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch,
gfp_t gfp)
{
- return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_device *dev,
+static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
struct i915_page_dma *scratch)
{
- cleanup_page_dma(dev, scratch);
+ cleanup_page_dma(dev_priv, scratch);
}
-static struct i915_page_table *alloc_pt(struct drm_device *dev)
+static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
{
struct i915_page_table *pt;
- const size_t count = INTEL_INFO(dev)->gen >= 8 ?
- GEN8_PTES : GEN6_PTES;
+ const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
@@ -446,7 +440,7 @@ static struct i915_page_table *alloc_pt(struct drm_device *dev)
if (!pt->used_ptes)
goto fail_bitmap;
- ret = setup_px(dev, pt);
+ ret = setup_px(dev_priv, pt);
if (ret)
goto fail_page_m;
@@ -460,9 +454,10 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
+static void free_pt(struct drm_i915_private *dev_priv,
+ struct i915_page_table *pt)
{
- cleanup_px(dev, pt);
+ cleanup_px(dev_priv, pt);
kfree(pt->used_ptes);
kfree(pt);
}
@@ -491,7 +486,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct drm_device *dev)
+static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
@@ -505,7 +500,7 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev)
if (!pd->used_pdes)
goto fail_bitmap;
- ret = setup_px(dev, pd);
+ ret = setup_px(dev_priv, pd);
if (ret)
goto fail_page_m;
@@ -519,10 +514,11 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+static void free_pd(struct drm_i915_private *dev_priv,
+ struct i915_page_directory *pd)
{
if (px_page(pd)) {
- cleanup_px(dev, pd);
+ cleanup_px(dev_priv, pd);
kfree(pd->used_pdes);
kfree(pd);
}
@@ -538,10 +534,10 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
fill_px(to_i915(vm->dev), pd, scratch_pde);
}
-static int __pdp_init(struct drm_device *dev,
+static int __pdp_init(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev);
+ size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
@@ -570,22 +566,22 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
}
static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev, pdp);
+ ret = __pdp_init(dev_priv, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev, pdp);
+ ret = setup_px(dev_priv, pdp);
if (ret)
goto fail_page_m;
@@ -599,12 +595,12 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_device *dev,
+static void free_pdp(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev)) {
- cleanup_px(dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ cleanup_px(dev_priv, pdp);
kfree(pdp);
}
}
@@ -638,7 +634,7 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pdpe_t *page_directorypo;
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
page_directorypo = kmap_px(pdp);
@@ -654,7 +650,7 @@ gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
- WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+ WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
@@ -714,7 +710,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -741,7 +737,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
bitmap_clear(pt->used_ptes, pte, num_entries);
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
- free_pt(vm->dev, pt);
+ free_pt(to_i915(vm->dev), pt);
return true;
}
@@ -783,7 +779,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
}
if (bitmap_empty(pd->used_pdes, I915_PDES)) {
- free_pd(vm->dev, pd);
+ free_pd(to_i915(vm->dev), pd);
return true;
}
@@ -799,6 +795,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint64_t pdpe;
gen8_ppgtt_pdpe_t *pdpe_vaddr;
@@ -811,7 +808,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
- if (USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
pdpe_vaddr = kmap_px(pdp);
pdpe_vaddr[pdpe] = scratch_pdpe;
kunmap_px(ppgtt, pdpe_vaddr);
@@ -821,9 +818,9 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
mark_tlbs_dirty(ppgtt);
- if (USES_FULL_48BIT_PPGTT(vm->dev) &&
- bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
- free_pdp(vm->dev, pdp);
+ if (USES_FULL_48BIT_PPGTT(dev_priv) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
+ free_pdp(dev_priv, pdp);
return true;
}
@@ -846,7 +843,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
gen8_ppgtt_pml4e_t scratch_pml4e =
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev));
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (WARN_ON(!pml4->pdps[pml4e]))
@@ -866,7 +863,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
else
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
@@ -901,7 +898,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+ if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
break;
pde = 0;
}
@@ -924,7 +921,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -939,7 +936,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct drm_device *dev,
+static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd)
{
int i;
@@ -951,34 +948,34 @@ static void gen8_free_page_tables(struct drm_device *dev,
if (WARN_ON(!pd->page_table[i]))
continue;
- free_pt(dev, pd->page_table[i]);
+ free_pt(dev_priv, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev);
+ vm->scratch_pd = alloc_pd(dev_priv);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev)) {
- vm->scratch_pdp = alloc_pdp(dev);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ vm->scratch_pdp = alloc_pdp(dev_priv);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -987,17 +984,17 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev))
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev, vm->scratch_pd);
+ free_pd(dev_priv, vm->scratch_pd);
free_pt:
- free_pt(dev, vm->scratch_pt);
+ free_pt(dev_priv, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return ret;
}
@@ -1035,54 +1032,56 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- if (USES_FULL_48BIT_PPGTT(dev))
- free_pdp(dev, vm->scratch_pdp);
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ if (USES_FULL_48BIT_PPGTT(dev_priv))
+ free_pdp(dev_priv, vm->scratch_pdp);
+ free_pd(dev_priv, vm->scratch_pd);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
struct i915_page_directory_pointer *pdp)
{
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+ for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
- gen8_free_page_tables(dev, pdp->page_directory[i]);
- free_pd(dev, pdp->page_directory[i]);
+ gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
+ free_pd(dev_priv, pdp->page_directory[i]);
}
- free_pdp(dev, pdp);
+ free_pdp(dev_priv, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
}
- cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+ cleanup_px(dev_priv, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(to_i915(vm->dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
- gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+ if (!USES_FULL_48BIT_PPGTT(dev_priv))
+ gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
@@ -1113,7 +1112,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -1125,7 +1124,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
continue;
}
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt))
goto unwind_out;
@@ -1139,7 +1138,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev, pd->page_table[pde]);
+ free_pt(dev_priv, pd->page_table[pde]);
return -ENOMEM;
}
@@ -1174,10 +1173,10 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1185,7 +1184,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
if (test_bit(pdpe, pdp->used_pdpes))
continue;
- pd = alloc_pd(dev);
+ pd = alloc_pd(dev_priv);
if (IS_ERR(pd))
goto unwind_out;
@@ -1199,7 +1198,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
return -ENOMEM;
}
@@ -1227,7 +1226,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1235,7 +1234,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev);
+ pdp = alloc_pdp(dev_priv);
if (IS_ERR(pdp))
goto unwind_out;
@@ -1253,7 +1252,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev, pml4->pdps[pml4e]);
+ free_pdp(dev_priv, pml4->pdps[pml4e]);
return -ENOMEM;
}
@@ -1302,12 +1301,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
/* Wrap is never okay since we can only represent 48b, and we don't
@@ -1395,11 +1394,12 @@ err_out:
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
+ free_pt(dev_priv,
+ pdp->page_directory[pdpe]->page_table[temp]);
}
for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev, pdp->page_directory[pdpe]);
+ free_pd(dev_priv, pdp->page_directory[pdpe]);
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
@@ -1450,7 +1450,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
return ret;
}
@@ -1460,7 +1460,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(vm->dev))
+ if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1531,7 +1531,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+ if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1551,7 +1551,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+ uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
int ret;
/* We allocate temp bitmap for page tables for no gain
@@ -1584,6 +1584,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1599,8 +1600,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
+ ret = setup_px(dev_priv, &ppgtt->pml4);
if (ret)
goto free_scratch;
@@ -1609,7 +1610,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
- ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+ ret = __pdp_init(dev_priv, &ppgtt->pdp);
if (ret)
goto free_scratch;
@@ -1619,14 +1620,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
+ if (intel_vgpu_active(dev_priv)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
+ if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1801,22 +1802,21 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static void gen8_ppgtt_enable(struct drm_device *dev)
+static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
+ u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
-static void gen7_ppgtt_enable(struct drm_device *dev)
+static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
enum intel_engine_id id;
@@ -1840,9 +1840,8 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
}
-static void gen6_ppgtt_enable(struct drm_device *dev)
+static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1928,8 +1927,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1959,7 +1957,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
- pt = alloc_pt(dev);
+ pt = alloc_pt(dev_priv);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
@@ -2007,7 +2005,7 @@ unwind_out:
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(vm->dev, pt);
+ free_pt(dev_priv, pt);
}
mark_tlbs_dirty(ppgtt);
@@ -2016,16 +2014,16 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
int ret;
- ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev);
+ vm->scratch_pt = alloc_pt(dev_priv);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev, &vm->scratch_page);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -2036,17 +2034,17 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
- free_pt(dev, vm->scratch_pt);
- cleanup_scratch_page(dev, &vm->scratch_page);
+ free_pt(dev_priv, vm->scratch_pt);
+ cleanup_scratch_page(dev_priv, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_device *dev = vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_table *pt;
uint32_t pde;
@@ -2054,7 +2052,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev, pt);
+ free_pt(dev_priv, pt);
gen6_free_scratch(vm);
}
@@ -2062,8 +2060,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2128,8 +2125,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
@@ -2200,10 +2196,15 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
-static void gtt_write_workarounds(struct drm_device *dev)
+static void i915_address_space_fini(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_gem_timeline_fini(&vm->timeline);
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+}
+static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+{
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
@@ -2236,11 +2237,9 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
return ret;
}
-int i915_ppgtt_init_hw(struct drm_device *dev)
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gtt_write_workarounds(dev);
+ gtt_write_workarounds(dev_priv);
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
@@ -2248,17 +2247,17 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (i915.enable_execlists)
return 0;
- if (!USES_PPGTT(dev))
+ if (!USES_PPGTT(dev_priv))
return 0;
if (IS_GEN6(dev_priv))
- gen6_ppgtt_enable(dev);
+ gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
- gen7_ppgtt_enable(dev);
- else if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_enable(dev);
+ gen7_ppgtt_enable(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_enable(dev_priv);
else
- MISSING_CASE(INTEL_INFO(dev)->gen);
+ MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@@ -2286,7 +2285,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
@@ -2298,9 +2297,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- i915_gem_timeline_fini(&ppgtt->base.timeline);
- list_del(&ppgtt->base.global_link);
- drm_mm_takedown(&ppgtt->base.mm);
+ i915_address_space_fini(&ppgtt->base);
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
@@ -2362,15 +2359,14 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
i915_check_and_clear_faults(dev_priv);
@@ -2842,8 +2838,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
- drm_mm_takedown(&ggtt->base.mm);
- list_del(&ggtt->base.global_link);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_address_space_fini(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
@@ -2932,6 +2929,7 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
struct pci_dev *pdev = ggtt->base.dev->pdev;
phys_addr_t phys_addr;
int ret;
@@ -2946,7 +2944,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(to_i915(ggtt->base.dev)))
+ if (IS_BROXTON(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2955,9 +2953,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(ggtt->base.dev,
- &ggtt->base.scratch_page,
- GFP_DMA32);
+ ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -3046,7 +3042,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->dev, &vm->scratch_page);
+ cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3262,7 +3258,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- ret = i915_gem_init_stolen(&dev_priv->drm);
+ ret = i915_gem_init_stolen(dev_priv);
if (ret)
goto out_gtt_cleanup;
@@ -3281,9 +3277,8 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
@@ -3318,7 +3313,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
@@ -3327,7 +3322,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
return;
}
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
@@ -3348,176 +3343,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
-{
- const unsigned int idx = rq->engine->id;
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_read[idx]);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
- i915_vma_clear_active(vma, idx);
- if (i915_vma_is_active(vma))
- return;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
- WARN_ON(i915_vma_unbind(vma));
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
-
- obj->mm.dirty = true; /* be paranoid */
-
- if (i915_gem_object_has_active_reference(obj)) {
- i915_gem_object_clear_active_reference(obj);
- i915_gem_object_put(obj);
- }
-}
-
-static void
-i915_ggtt_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct i915_vma *vma =
- container_of(active, struct i915_vma, last_write);
-
- intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
-}
-
-void i915_vma_destroy(struct i915_vma *vma)
-{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->fence);
-
- list_del(&vma->vm_link);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-void i915_vma_close(struct i915_vma *vma)
-{
- GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
-
- list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
-
- if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
- WARN_ON(i915_vma_unbind(vma));
-}
-
-static inline long vma_compare(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
-
- if (vma->vm != vm)
- return vma->vm - vm;
-
- if (!view)
- return vma->ggtt_view.type;
-
- if (vma->ggtt_view.type != view->type)
- return vma->ggtt_view.type - view->type;
-
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params));
-}
-
-static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
- struct rb_node *rb, **p;
- int i;
-
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
- if (vma == NULL)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&vma->exec_list);
- for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
- init_request_active(&vma->last_read[i], i915_vma_retire);
- init_request_active(&vma->last_write,
- i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
- init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
- vma->vm = vm;
- vma->obj = obj;
- vma->size = obj->base.size;
-
- if (view) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
- vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
- vma->size <<= PAGE_SHIFT;
- }
- }
-
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
- list_add(&vma->obj_link, &obj->vma_list);
- } else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
- list_add_tail(&vma->obj_link, &obj->vma_list);
- }
-
- rb = NULL;
- p = &obj->vma_tree.rb_node;
- while (*p) {
- struct i915_vma *pos;
-
- rb = *p;
- pos = rb_entry(rb, struct i915_vma, obj_node);
- if (vma_compare(pos, vm, view) < 0)
- p = &rb->rb_right;
- else
- p = &rb->rb_left;
- }
- rb_link_node(&vma->obj_node, rb, p);
- rb_insert_color(&vma->obj_node, &obj->vma_tree);
-
- return vma;
-}
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
-
- return __i915_vma_create(obj, vm, view);
-}
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -3530,7 +3355,7 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
- cmp = vma_compare(vma, vm, view);
+ cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
@@ -3555,7 +3380,7 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma) {
- vma = __i915_vma_create(obj, vm, view);
+ vma = i915_vma_create(obj, vm, view);
GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
}
@@ -3747,99 +3572,3 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
-/**
- * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
- * @vma: VMA to map
- * @cache_level: mapping cache level
- * @flags: flags like global or local mapping
- *
- * DMA addresses are taken from the scatter-gather table of this object (or of
- * this VMA in case of non-default GGTT views) and PTE entries set up.
- * Note that DMA addresses are also the only part of the SG table we care about.
- */
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
-{
- u32 bind_flags;
- u32 vma_flags;
- int ret;
-
- if (WARN_ON(flags == 0))
- return -EINVAL;
-
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
-
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (flags & PIN_UPDATE)
- bind_flags |= vma_flags;
- else
- bind_flags &= ~vma_flags;
- if (bind_flags == 0)
- return 0;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
- ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
-
- vma->flags |= bind_flags;
- return 0;
-}
-
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
-{
- void __iomem *ptr;
-
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
-
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
-
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
-
- ptr = vma->iomap;
- if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
- vma->node.start,
- vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
-
- vma->iomap = ptr;
- }
-
- __i915_vma_pin(vma);
- return ptr;
-}
-
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
-{
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
-
- vma = fetch_and_zero(p_vma);
- if (!vma)
- return;
-
- obj = vma->obj;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- __i915_gem_object_release_unless_active(obj);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c23ef9db1f53..4f35be4c26c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,7 +35,9 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/mm.h>
+#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
@@ -118,8 +120,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -138,6 +140,8 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+struct sg_table;
+
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
@@ -168,135 +172,7 @@ extern const struct i915_ggtt_view i915_ggtt_view_rotated;
enum i915_cache_level;
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
- struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
- struct drm_i915_fence_reg *fence;
- struct sg_table *pages;
- void __iomem *iomap;
- u64 size;
- u64 display_alignment;
-
- unsigned int flags;
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
- */
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
-
- /** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
- struct i915_gem_active last_fence;
-
- /**
- * Support different GGTT views into the same object.
- * This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
- * assumed in GEM functions which take no ggtt view parameter.
- */
- struct i915_ggtt_view ggtt_view;
-
- /** This object's place on the active/inactive lists */
- struct list_head vm_link;
-
- struct list_head obj_link; /* Link in the object's VMA list */
- struct rb_node obj_node;
-
- /** This vma's place in the batchbuffer or on the eviction list */
- struct list_head exec_list;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- struct hlist_node exec_node;
- unsigned long exec_handle;
- struct drm_i915_gem_exec_object2 *exec_entry;
-};
-
-struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
-
-static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_GGTT;
-}
-
-static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CAN_FENCE;
-}
-
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
- return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
- unsigned int engine)
-{
- vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
- unsigned int engine)
-{
- return vma->active & BIT(engine);
-}
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
-}
+struct i915_vma;
struct i915_page_dma {
struct page *page;
@@ -606,13 +482,20 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv,
@@ -629,8 +512,8 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
-void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
@@ -653,88 +536,4 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_OFFSET_FIXED BIT(11)
#define PIN_OFFSET_MASK (~4095)
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
- return 0;
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
-
-static inline int i915_vma_pin_count(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_PIN_MASK;
-}
-
-static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
-{
- return i915_vma_pin_count(vma);
-}
-
-static inline void __i915_vma_pin(struct i915_vma *vma)
-{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
-}
-
-static inline void __i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->flags--;
-}
-
-static inline void i915_vma_unpin(struct i915_vma *vma)
-{
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- __i915_vma_unpin(vma);
-}
-
-/**
- * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
- * @vma: VMA to iomap
- *
- * The passed in VMA has to be pinned in the global GTT mappable region.
- * An extra pinning of the VMA is acquired for the return iomapping,
- * the caller must call i915_vma_unpin_iomap to relinquish the pinning
- * after the iomapping is no longer required.
- *
- * Callers must hold the struct_mutex.
- *
- * Returns a valid iomapped pointer or ERR_PTR.
- */
-void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
-#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
-
-/**
- * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
- * @vma: VMA to unpin
- *
- * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
- *
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
- */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
-
-static inline struct page *i915_vma_first_page(struct i915_vma *vma)
-{
- GEM_BUG_ON(!vma->pages);
- return sg_page(vma->pages->sgl);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
new file mode 100644
index 000000000000..6a368de9d81e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <linux/reservation.h>
+
+#include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include <drm/drmP.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *);
+ void (*release)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+ struct rb_root vma_tree;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
+
+ struct list_head batch_pool_link;
+
+ unsigned long flags;
+
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF 0
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+ unsigned int cache_level:3;
+ unsigned int cache_dirty:1;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_gem_active frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
+ unsigned int active_count;
+ unsigned int pin_display;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ */
+ struct reservation_object *resv;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned long framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return obj->active_count;
+}
+
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0b3b051a5683..27e8f257fb39 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -113,6 +113,82 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+static void
+__i915_priotree_add_dependency(struct i915_priotree *pt,
+ struct i915_priotree *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &pt->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+}
+
+static int
+i915_priotree_add_dependency(struct drm_i915_private *i915,
+ struct i915_priotree *pt,
+ struct i915_priotree *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+ return 0;
+}
+
+static void
+i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+{
+ struct i915_dependency *dep, *next;
+
+ GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+
+ /* Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+}
+
+static void
+i915_priotree_init(struct i915_priotree *pt)
+{
+ INIT_LIST_HEAD(&pt->signalers_list);
+ INIT_LIST_HEAD(&pt->waiters_list);
+ RB_CLEAR_NODE(&pt->node);
+ pt->priority = INT_MIN;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -124,7 +200,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_sw_fence_done(&request->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&request->execute));
GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!request->i915->gt.active_requests);
trace_i915_gem_request_retire(request);
@@ -142,7 +221,12 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
- request->i915->gt.active_requests--;
+ if (!--request->i915->gt.active_requests) {
+ GEM_BUG_ON(!request->i915->gt.awake);
+ mod_delayed_work(request->i915->wq,
+ &request->i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -182,6 +266,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_context_put(request->ctx);
dma_fence_signal(&request->fence);
+
+ i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
}
@@ -241,9 +327,8 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
- while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
- yield();
- yield();
+ while (intel_breadcrumbs_busy(i915))
+ cond_resched(); /* spin until threads are complete */
}
atomic_set(&timeline->next_seqno, seqno);
@@ -307,25 +392,16 @@ static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
return atomic_inc_return(&tl->next_seqno);
}
-static int __i915_sw_fence_call
-submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request =
- container_of(fence, typeof(*request), submit);
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
- unsigned long flags;
u32 seqno;
- if (state != FENCE_COMPLETE)
- return NOTIFY_DONE;
-
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
-
- /* Will be called from irq-context when using foreign DMA fences */
- spin_lock_irqsave(&timeline->lock, flags);
+ assert_spin_locked(&timeline->lock);
seqno = timeline_get_seqno(timeline->common);
GEM_BUG_ON(!seqno);
@@ -345,14 +421,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
- engine->submit_request(request);
- spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
- spin_unlock_irqrestore(&timeline->lock, flags);
+ i915_sw_fence_commit(&request->execute);
+}
+
+void i915_gem_request_submit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_submit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ if (state == FENCE_COMPLETE) {
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), submit);
+
+ request->engine->submit_request(request);
+ }
+
+ return NOTIFY_DONE;
+}
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
return NOTIFY_DONE;
}
@@ -441,6 +546,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
__timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
+ i915_sw_fence_init(&req->execute, execute_notify);
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
+
+ i915_priotree_init(&req->priotree);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
@@ -495,6 +608,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
GEM_BUG_ON(to == from);
+ if (to->engine->schedule) {
+ ret = i915_priotree_add_dependency(to->i915,
+ &to->priotree,
+ &from->priotree);
+ if (ret < 0)
+ return ret;
+ }
+
if (to->timeline == from->timeline)
return 0;
@@ -650,6 +771,8 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
if (dev_priv->gt.awake)
return;
+ GEM_BUG_ON(!dev_priv->gt.active_requests);
+
intel_runtime_pm_get_noresume(dev_priv);
dev_priv->gt.awake = true;
@@ -718,9 +841,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev)
+ if (prev) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
+ if (engine->schedule)
+ __i915_priotree_add_dependency(&request->priotree,
+ &prev->priotree,
+ &request->dep,
+ 0);
+ }
spin_lock_irq(&timeline->lock);
list_add_tail(&request->link, &timeline->requests);
@@ -737,6 +866,19 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
i915_gem_mark_busy(engine);
+ /* Let the backend know a new request has arrived that may need
+ * to adjust the existing execution schedule due to a high priority
+ * request - i.e. we may want to preempt the current request in order
+ * to run a high priority dependency chain *before* we can execute this
+ * request.
+ *
+ * This is called before the request is ready to run so that we can
+ * decide whether to preempt the entire chain so that it is ready to
+ * run at the earliest possible convenience.
+ */
+ if (engine->schedule)
+ engine->schedule(request, request->ctx->priority);
+
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -817,9 +959,9 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
}
static long
-__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
- unsigned int flags,
- long timeout)
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -831,9 +973,9 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
add_wait_queue(q, &reset);
do {
- prepare_to_wait(&request->submit.wait, &wait, state);
+ prepare_to_wait(&request->execute.wait, &wait, state);
- if (i915_sw_fence_done(&request->submit))
+ if (i915_sw_fence_done(&request->execute))
break;
if (flags & I915_WAIT_LOCKED &&
@@ -851,7 +993,7 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
timeout = io_schedule_timeout(timeout);
} while (timeout);
- finish_wait(&request->submit.wait, &wait);
+ finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(q, &reset);
@@ -903,13 +1045,14 @@ long i915_wait_request(struct drm_i915_gem_request *req,
trace_i915_gem_request_wait_begin(req);
- if (!i915_sw_fence_done(&req->submit)) {
- timeout = __i915_request_wait_for_submit(req, flags, timeout);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
if (timeout < 0)
goto complete;
- GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
}
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
@@ -1013,13 +1156,6 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
if (!dev_priv->gt.active_requests)
return;
- GEM_BUG_ON(!dev_priv->gt.awake);
-
for_each_engine(engine, dev_priv, id)
engine_retire_requests(engine);
-
- if (!dev_priv->gt.active_requests)
- mod_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 75f8360b3421..e2b077df2da0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -30,6 +30,9 @@
#include "i915_gem.h"
#include "i915_sw_fence.h"
+struct drm_file;
+struct drm_i915_gem_object;
+
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
@@ -41,6 +44,33 @@ struct intel_signal_node {
struct intel_wait wait;
};
+struct i915_dependency {
+ struct i915_priotree *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+/* Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct rb_node node;
+ int priority;
+#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
/**
* Request queue structure.
*
@@ -84,8 +114,34 @@ struct drm_i915_gem_request {
struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
+
+ /* A list of everyone we wait upon, and everyone who waits upon us.
+ * Even though we will not be submitted to the hardware before the
+ * submit fence is signaled (it waits for all external events as well
+ * as our own requests), the scheduler still needs to know the
+ * dependency tree for the lifetime of the request (from execbuf
+ * to retirement), i.e. bidirectional dependency information for the
+ * request not tied to individual fences.
+ */
+ struct i915_priotree priotree;
+ struct i915_dependency dep;
u32 global_seqno;
@@ -143,9 +199,6 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
-
- /** Link in the execlist submission queue, guarded by execlist_lock. */
- struct list_head execlist_link;
};
extern const struct dma_fence_ops i915_fence_ops;
@@ -162,18 +215,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
-static inline u32
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->global_seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
static inline struct drm_i915_gem_request *
to_request(struct dma_fence *fence)
{
@@ -226,6 +267,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
+void __i915_gem_request_submit(struct drm_i915_gem_request *request);
+void i915_gem_request_submit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b1d367dba347..ebaa941c83af 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -89,9 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
@@ -253,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -264,7 +263,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
- r = devm_request_mem_region(dev->dev, base + 1,
+ r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
@@ -408,9 +407,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-int i915_gem_init_stolen(struct drm_device *dev)
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -418,7 +416,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
mutex_init(&dev_priv->mm.stolen_lock);
#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
+ if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
@@ -427,7 +425,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -515,12 +513,10 @@ i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
- DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > ggtt->stolen_size - size);
+ GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -529,11 +525,11 @@ i915_pages_create_for_stolen(struct drm_device *dev,
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -557,7 +553,7 @@ i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- /* Should only be called during free */
+ /* Should only be called from i915_gem_object_release_stolen() */
sg_free_table(pages);
kfree(pages);
}
@@ -566,15 +562,16 @@ static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
+
+ GEM_BUG_ON(!stolen);
__i915_gem_object_unpin_pages(obj);
- if (obj->stolen) {
- i915_gem_stolen_remove_node(dev_priv, obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
+ i915_gem_stolen_remove_node(dev_priv, stolen);
+ kfree(stolen);
}
+
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
@@ -596,7 +593,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(to_i915(dev)) ?
+ I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
@@ -619,7 +617,6 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 251d51b01174..c85e7b06bdba 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -60,9 +60,9 @@
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_private *dev_priv,
+ int stride, int size, int tiling_mode)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int tile_width;
/* Linear is always fine */
@@ -81,10 +81,10 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
@@ -104,7 +104,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -199,7 +199,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev,
+ if (!i915_tiling_ok(dev_priv,
args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put(obj);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index fc8f13a79f8f..bf8a471b61e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -24,9 +24,11 @@
#include "i915_drv.h"
-int i915_gem_timeline_init(struct drm_i915_private *i915,
- struct i915_gem_timeline *timeline,
- const char *name)
+static int __i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name,
+ struct lock_class_key *lockclass,
+ const char *lockname)
{
unsigned int i;
u64 fences;
@@ -47,8 +49,11 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
tl->fence_context = fences++;
tl->common = timeline;
-
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
spin_lock_init(&tl->lock);
+#endif
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
}
@@ -56,6 +61,26 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
return 0;
}
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915, timeline, name,
+ &class, "&timeline->lock");
+}
+
+int i915_gem_timeline_init__global(struct drm_i915_private *i915)
+{
+ static struct lock_class_key class;
+
+ return __i915_gem_timeline_init(i915,
+ &i915->gt.global_timeline,
+ "[execution]",
+ &class, "&global_timeline->lock");
+}
+
void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
{
lockdep_assert_held(&tl->i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index f2bf7b1d49a1..98d99a62b4ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -67,6 +67,7 @@ struct i915_gem_timeline {
int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
+int i915_gem_timeline_init__global(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 64261639f547..107ddf51065e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -753,12 +753,13 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 204093f3eaa5..ae84aa4b1467 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -528,8 +528,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
@@ -573,7 +572,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev)) {
+ if (HAS_CSR(dev_priv)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
@@ -585,7 +584,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
@@ -600,10 +599,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
@@ -708,7 +707,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev, error->display);
+ intel_display_print_error_state(m, dev_priv, error->display);
out:
if (m->bytes == 0 && m->err)
@@ -861,16 +860,19 @@ out:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+ struct drm_i915_gem_request *request;
+
+ request = __i915_gem_active_peek(active);
+ return request ? request->global_seqno : 0;
}
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
- engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
- return engine ? engine->id : -1;
+ request = __i915_gem_active_peek(active);
+ return request ? request->engine->id : -1;
}
static void capture_bo(struct drm_i915_error_buffer *err,
@@ -884,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
- err->wseqno = __active_get_seqno(&vma->last_write);
- err->engine = __active_get_engine_id(&vma->last_write);
+ err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
+ err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
@@ -1440,7 +1442,6 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1461,7 +1462,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
@@ -1473,10 +1474,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
error->forcewake = I915_READ_FW(FORCEWAKE_MT);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
@@ -1489,10 +1490,10 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev))
+ if (HAS_HW_CONTEXTS(dev_priv))
error->ccid = I915_READ(CCID);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 666dab7a675a..4462112725ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -629,11 +629,23 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
- unsigned int engine_id = rq->engine->id;
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ rq->previous_context = engine->last_context;
+ engine->last_context = rq->ctx;
+
+ i915_gem_request_submit(rq);
+
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
@@ -1520,6 +1532,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
+ engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
list_for_each_entry(request,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6d7505b5c5e7..07ca71cabb2b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2848,10 +2848,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ibx_irq_reset(struct drm_device *dev)
+static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_NOP(dev_priv))
return;
@@ -2881,12 +2879,10 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_device *dev)
+static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
GEN5_IRQ_RESET(GT);
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
@@ -2951,9 +2947,9 @@ static void ironlake_irq_reset(struct drm_device *dev)
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2963,7 +2959,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -2999,7 +2995,7 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev);
+ ibx_irq_reset(dev_priv);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3222,7 +3218,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
@@ -3242,7 +3238,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
@@ -3466,7 +3462,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev);
+ gen5_gt_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3678,7 +3674,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -3712,7 +3708,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_USER_INTERRUPT;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
@@ -3880,7 +3876,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
@@ -4145,7 +4141,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
- if (HAS_GUC_SCHED(dev))
+ if (HAS_GUC_SCHED(dev_priv))
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 629e4334719c..d46ffe7086bc 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -39,7 +39,7 @@ struct i915_params i915 __read_mostly = {
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
- .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
+ .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
@@ -145,9 +145,10 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
-MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support.");
+module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+MODULE_PARM_DESC(alpha_support,
+ "Enable alpha quality driver support for latest hardware. "
+ "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 94efc899c1ef..817ad959941e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -40,7 +40,7 @@ struct i915_params {
int enable_ppgtt;
int enable_execlists;
int enable_psr;
- unsigned int preliminary_hw_support;
+ unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2a419500b81a..fce8e198bc76 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -363,6 +363,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_guc = 1,
+ .has_decoupled_mmio = 1,
.ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -439,9 +440,10 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
+ "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
+ "to enable support in this kernel version, or check for kernel updates.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3361d7ffc63e..c70c07a7b586 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7342,6 +7342,13 @@ enum {
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+/* Decoupled MMIO register pair for kernel driver */
+#define GEN9_DECOUPLED_REG0_DW0 _MMIO(0xF00)
+#define GEN9_DECOUPLED_REG0_DW1 _MMIO(0xF04)
+#define GEN9_DECOUPLED_DW1_GO (1<<31)
+#define GEN9_DECOUPLED_PD_SHIFT 28
+#define GEN9_DECOUPLED_OP_SHIFT 24
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 344cbf39cfa9..b0e1e7ca75da 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,12 +29,10 @@
#include "intel_drv.h"
#include "i915_reg.h"
-static void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration control */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
@@ -42,12 +40,10 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
-static void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Display arbitration */
- if (INTEL_INFO(dev)->gen <= 4)
+ if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
@@ -57,7 +53,7 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev);
+ i915_redisable_vga(dev_priv);
}
int i915_save_state(struct drm_device *dev)
@@ -68,14 +64,14 @@ int i915_save_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_save_display(dev);
+ i915_save_display(dev_priv);
if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
@@ -114,15 +110,15 @@ int i915_restore_state(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- i915_gem_restore_fences(dev);
+ i915_gem_restore_fences(dev_priv);
if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
- i915_restore_display(dev);
+ i915_restore_display(dev_priv);
/* Cache mode state */
- if (INTEL_INFO(dev)->gen < 7)
+ if (INTEL_GEN(dev_priv) < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 95f2f12e0917..147420ccf49c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -116,11 +116,14 @@ static void i915_sw_fence_await(struct i915_sw_fence *fence)
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key)
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
- init_waitqueue_head(&fence->wait);
+ __init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 707dfc4f0da5..7508d23f823b 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -40,7 +40,22 @@ typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
enum i915_sw_fence_notify state);
#define __i915_sw_fence_call __aligned(4)
-void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void __i915_sw_fence_init(struct i915_sw_fence *fence,
+ i915_sw_fence_notify_t fn,
+ const char *name,
+ struct lock_class_key *key);
+#ifdef CONFIG_LOCKDEP
+#define i915_sw_fence_init(fence, fn) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_sw_fence_init((fence), (fn), #fence, &__key); \
+} while (0)
+#else
+#define i915_sw_fence_init(fence, fn) \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL)
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
new file mode 100644
index 000000000000..a792dcb902b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_vma.h"
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_frontbuffer.h"
+
+#include <drm/drm_gem.h>
+
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static struct i915_vma *
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ struct rb_node *rb, **p;
+ int i;
+
+ GEM_BUG_ON(vm->closed);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
+ vma->vm = vm;
+ vma->obj = obj;
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
+ } else {
+ i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+ }
+
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (i915_vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+ return vma;
+}
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+/**
+ * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
+ * @vma: VMA to map
+ * @cache_level: mapping cache level
+ * @flags: flags like global or local mapping
+ *
+ * DMA addresses are taken from the scatter-gather table of this object (or of
+ * this VMA in case of non-default GGTT views) and PTE entries set up.
+ * Note that DMA addresses are also the only part of the SG table we care about.
+ */
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 bind_flags;
+ u32 vma_flags;
+ int ret;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= I915_VMA_GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= I915_VMA_LOCAL_BIND;
+
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma_flags;
+ else
+ bind_flags &= ~vma_flags;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma);
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->flags |= bind_flags;
+ return 0;
+}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return IO_ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ __i915_vma_pin(vma);
+ return ptr;
+}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
+}
+
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
+
+ if (vma->node.size < size)
+ return true;
+
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
+ if (flags & PIN_OFFSET_BIAS &&
+ vma->node.start < (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_FIXED &&
+ vma->node.start != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ return false;
+}
+
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ dev_priv->ggtt.mappable_end);
+
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *other;
+
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
+ */
+ if (vma->vm->mm.color_adjust == NULL)
+ return true;
+
+ if (!drm_mm_node_allocated(gtt_space))
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+/**
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
+ * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
+ u64 start, end;
+ int ret;
+
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
+
+ start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+
+ end = vma->vm->total;
+ if (flags & PIN_MAPPABLE)
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ if (flags & PIN_ZONE_4G)
+ end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
+ */
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
+ flags & PIN_MAPPABLE ? "mappable" : "total",
+ end);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ if (flags & PIN_OFFSET_FIXED) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret) {
+ ret = i915_gem_evict_for_vma(vma);
+ if (ret == 0)
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
+ }
+ } else {
+ u32 search_flag, alloc_flag;
+
+ if (flags & PIN_HIGH) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ }
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
+ obj->cache_level,
+ start, end,
+ flags);
+ if (ret == 0)
+ goto search_free;
+
+ goto err_unpin;
+ }
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
+ }
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
+{
+ unsigned int bound = vma->flags;
+ int ret;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
+
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
+
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
+
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
+
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned long active;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
+ */
+ __i915_vma_pin(vma);
+
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
+
+ __i915_vma_unpin(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ }
+
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* release the fence reg _after_ flushing */
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ __i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
+ }
+
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
+ }
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist. */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
new file mode 100644
index 000000000000..85446f0b0b3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_VMA_H__
+#define __I915_VMA_H__
+
+#include <linux/io-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "i915_gem_gtt.h"
+#include "i915_gem_fence_reg.h"
+#include "i915_gem_object.h"
+#include "i915_gem_request.h"
+
+
+enum i915_cache_level;
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
+ void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
+
+ /** Flags and address space this VMA is bound to */
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
+
+ /**
+ * Support different GGTT views into the same object.
+ * This means there can be multiple VMA mappings per object and per VM.
+ * i915_ggtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * assumed in GEM functions which take no ggtt view parameter.
+ */
+ struct i915_ggtt_view ggtt_view;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head vm_link;
+
+ struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+};
+
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ i915_gem_object_put(vma->obj);
+}
+
+static inline long
+i915_vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
+int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ u32 flags);
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+bool
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
+
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
+}
+
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+static inline void
+i915_vma_unpin_fence(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 984a6b75c118..ff821649486e 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -106,6 +106,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
@@ -167,6 +168,14 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ state->rotation & DRM_ROTATE_180 &&
+ state->rotation & DRM_REFLECT_X) {
+ DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 813fd74d9c8d..1c509f7410f5 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -574,23 +574,26 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
enum port port = intel_encoder->port;
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- connector = drm_select_eld(encoder);
- if (!connector)
+ connector = conn_state->connector;
+ if (!connector || !connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -601,7 +604,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 5ab646ef8c9f..7ffab1abc518 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1147,7 +1147,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->raw[25];
+ aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1677,7 +1677,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+ enum port port)
{
static const struct {
u16 dp, hdmi;
@@ -1691,22 +1692,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
- int i;
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
+ if (p_child->common.dvo_port == port_mapping[port].dp)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+ p_child->common.aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ if (child_dev_is_dp_dual_mode(p_child, port))
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index c410d3d6465f..c9c46a538edb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -629,35 +629,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
- /* To avoid the task_struct disappearing beneath us as we wake up
- * the process, we must first inspect the task_struct->state under the
- * RCU lock, i.e. as we call wake_up_process() we must be holding the
- * rcu_read_lock().
- */
- for_each_engine(engine, i915, id)
- if (unlikely(intel_engine_wakeup(engine)))
- mask |= intel_engine_flag(engine);
+ for_each_engine(engine, i915, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
- return mask;
-}
+ spin_lock_irq(&b->lock);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int mask = 0;
+ if (b->first_wait) {
+ wake_up_process(b->first_wait->tsk);
+ mask |= intel_engine_flag(engine);
+ }
- for_each_engine(engine, i915, id) {
- if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
- wake_up_process(engine->breadcrumbs.signaler);
+ if (b->first_signal) {
+ wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
+
+ spin_unlock_irq(&b->lock);
}
return mask;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 445108855275..d81232b79f00 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -95,8 +95,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -180,7 +179,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
- if (INTEL_INFO(dev)->gen > 6) {
+ if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
if (intel_crtc_state->limited_color_range)
@@ -345,11 +344,10 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
@@ -428,8 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -446,7 +443,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->degamma_lut) {
lut = (struct drm_color_lut *) state->degamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
word0 =
@@ -461,7 +458,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
if (state->gamma_lut) {
lut = (struct drm_color_lut *) state->gamma_lut->data;
- lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
word0 =
@@ -497,12 +494,12 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state)
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
sizeof(struct drm_color_lut);
/*
@@ -529,8 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
@@ -549,10 +545,10 @@ void intel_color_init(struct drm_crtc *crtc)
}
/* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
- INTEL_INFO(dev)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ true,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 30eb95b54dcf..86ecec5601d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -147,14 +147,13 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int mode)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -673,8 +672,7 @@ static const struct dmi_system_id intel_spurious_crt_detect[] = {
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -693,7 +691,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (I915_HAS_HOTPLUG(dev)) {
+ if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -715,7 +713,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -731,7 +729,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_INFO(dev)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915.load_detect_test)
@@ -793,11 +791,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
@@ -915,7 +912,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev) &&
+ if (I915_HAS_HOTPLUG(dev_priv) &&
!dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev_priv)) {
@@ -932,7 +929,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 938ac4dbcb45..10ec9d4b7d45 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1753,8 +1753,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1787,7 +1786,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_INFO(dev)->gen < 9)
+ else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (type == INTEL_OUTPUT_HDMI) {
@@ -1837,8 +1836,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1856,7 +1854,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
@@ -1866,7 +1864,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(intel_encoder);
+ intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6fd6283fa1f8..8d270f7650de 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1035,9 +1035,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return crtc->config->cpu_transcoder;
}
-static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
@@ -1072,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
*/
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1087,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
- if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
+ if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
}
}
@@ -1293,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
@@ -1319,10 +1316,9 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
int sprite;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
@@ -1336,12 +1332,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -1595,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_device *dev)
+static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
count += crtc->base.state->active &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
}
@@ -1610,8 +1606,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
@@ -1622,7 +1617,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1647,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config->dpll_hw_state.dpll_md);
} else {
@@ -1682,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*/
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev)) {
+ !intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
@@ -3004,11 +2998,9 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
@@ -3021,7 +3013,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
@@ -3070,25 +3062,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
+ if (rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += crtc_state->pipe_src_w - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3097,14 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
- } else
- I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
+ } else {
+ I915_WRITE(DSPADDR(plane),
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
+ }
POSTING_READ(reg);
}
@@ -3172,6 +3173,9 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -3180,13 +3184,11 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->dspaddr_offset =
intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- x += (crtc_state->pipe_src_w - 1);
- y += (crtc_state->pipe_src_h - 1);
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += crtc_state->pipe_src_w - 1;
+ y += crtc_state->pipe_src_h - 1;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3376,9 +3378,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- const struct skl_plane_wm *p_wm =
- &crtc_state->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3414,9 +3413,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3449,18 +3445,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, p_wm,
- &dev_priv->wm.skl_results.ddb, 0);
-
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3510,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
+ i915_redisable_vga(to_i915(dev));
if (!state)
return 0;
@@ -3687,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3713,7 +3698,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
(pipe_config->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
@@ -4734,13 +4719,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
- intel_crtc->pipe, SKL_CRTC_INDEX);
-
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
@@ -4761,7 +4741,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
@@ -4769,10 +4748,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->base.visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_plane->base.name,
- intel_crtc->pipe, drm_plane_index(&intel_plane->base));
-
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
@@ -5096,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5163,7 +5140,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else if (pipe_config->update_wm_pre)
intel_update_watermarks(crtc);
}
@@ -5319,6 +5297,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5377,7 +5357,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -5408,11 +5388,12 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -5467,7 +5448,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
@@ -5483,7 +5464,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(pipe_config);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
else
intel_update_watermarks(intel_crtc);
@@ -5494,7 +5476,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5599,8 +5581,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5617,13 +5598,13 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
- if (intel_crtc->config->dp_encoder_is_mst)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc, false);
@@ -7051,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev)->num_pipes == 2)
+ if (INTEL_INFO(dev_priv)->num_pipes == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7192,7 +7173,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -7786,12 +7767,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
@@ -8245,8 +8225,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -8272,7 +8251,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_GEN(dev_priv) > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
I915_WRITE(HTOTAL(cpu_transcoder),
@@ -8395,8 +8374,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -8432,7 +8410,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
@@ -8442,7 +8420,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_INFO(dev)->gen < 4 ||
+ if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -8650,8 +8628,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
@@ -8663,7 +8640,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -8727,7 +8704,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
@@ -8739,7 +8716,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
@@ -8808,8 +8785,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8848,7 +8824,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
@@ -8856,7 +8832,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
@@ -9653,11 +9629,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_GEN(dev_priv) >= 5) {
m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
@@ -9669,7 +9644,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
- if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
@@ -9871,7 +9846,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
@@ -10661,8 +10636,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10689,7 +10663,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
+ if (INTEL_GEN(dev_priv) < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -10704,8 +10678,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10738,7 +10711,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
skl_init_scalers(dev_priv, crtc, pipe_config);
pipe_config->scaler_state.scaler_id = -1;
@@ -10748,7 +10721,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT(power_domain);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
@@ -10842,16 +10815,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- const struct skl_plane_wm *p_wm =
- &cstate->wm.skl.optimal.planes[PLANE_CURSOR];
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb);
-
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -10873,7 +10839,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10919,7 +10885,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- plane_state->base.rotation == DRM_ROTATE_180) {
+ plane_state->base.rotation & DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -12062,6 +12028,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12186,7 +12153,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
- if (INTEL_INFO(dev)->gen > 3 &&
+ if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
@@ -12261,7 +12228,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
engine = NULL;
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
@@ -12518,7 +12485,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
/* Pre-gen9 platforms need two-step watermark updates */
if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
if (visible || was_visible)
@@ -12623,7 +12590,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ ret = dev_priv->display.compute_intermediate_wm(dev,
intel_crtc,
pipe_config);
if (ret) {
@@ -12635,7 +12602,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
@@ -12748,6 +12715,16 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
}
+static inline void
+intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
+ unsigned int lane_count, struct intel_link_m_n *m_n)
+{
+ DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -12759,61 +12736,58 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
- crtc->base.base.id, crtc->base.name,
- context, pipe_config, pipe_name(crtc->pipe));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
+ crtc->base.base.id, crtc->base.name, context);
- DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
- DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
- DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_pch_encoder,
- pipe_config->fdi_lanes,
- pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
- pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
- pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
- pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
- pipe_config->dp_m_n.tu);
-
- DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- intel_crtc_has_dp_encoder(pipe_config),
- pipe_config->lane_count,
- pipe_config->dp_m2_n2.gmch_m,
- pipe_config->dp_m2_n2.gmch_n,
- pipe_config->dp_m2_n2.link_m,
- pipe_config->dp_m2_n2.link_n,
- pipe_config->dp_m2_n2.tu);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count, &pipe_config->dp_m_n);
+ if (pipe_config->has_drrs)
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio,
- pipe_config->has_infoframe);
+ pipe_config->has_audio, pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
- DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
- DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
- DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled));
+
+ DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
@@ -12864,20 +12838,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
- plane->base.id, plane->name);
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
+ plane->base.id, plane->name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->pixel_format, &format_name));
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
}
}
@@ -13192,12 +13166,11 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
}
static bool
-intel_pipe_config_compare(struct drm_device *dev,
+intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool ret = true;
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
@@ -13317,7 +13290,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_INFO(dev)->gen < 8) {
+ if (INTEL_GEN(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -13366,7 +13339,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -13441,8 +13414,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
@@ -13451,7 +13423,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
@@ -13557,11 +13529,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
static void
-verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
+verify_connector_state(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
- drm_for_each_connector(connector, dev) {
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
@@ -13676,7 +13652,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
+ if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
@@ -13769,15 +13745,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
static void
intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
if (!needs_modeset(new_state) &&
!to_intel_crtc_state(new_state)->update_pipe)
return;
verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, crtc);
+ verify_connector_state(crtc->dev, state, crtc);
verify_crtc_state(crtc, old_state, new_state);
verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
}
@@ -13793,10 +13770,11 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev)
+intel_modeset_verify_disabled(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
verify_encoder_state(dev);
- verify_connector_state(dev, NULL);
+ verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
@@ -14094,7 +14072,7 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (i915.fastboot &&
- intel_pipe_config_compare(dev,
+ intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -14294,6 +14272,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int updated = 0;
bool progress;
enum pipe pipe;
+ int i;
+
+ const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ /* ignore allocations for crtc's that have been turned off. */
+ if (crtc->state->active)
+ entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
* Whenever the number of active pipes changes, we need to make sure we
@@ -14302,7 +14288,6 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* cause pipe underruns and other bad stuff.
*/
do {
- int i;
progress = false;
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14313,12 +14298,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
cstate = to_intel_crtc_state(crtc->state);
pipe = intel_crtc->pipe;
- if (updated & cmask || !crtc->state->active)
+ if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(state, intel_crtc))
+
+ if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
continue;
updated |= cmask;
+ entries[i] = &cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -14327,7 +14314,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* new ddb allocation to take effect.
*/
if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
- &intel_crtc->hw_ddb) &&
+ &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14358,14 +14345,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
-
+ if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- }
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -14398,8 +14379,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active)
- intel_update_watermarks(intel_crtc);
+ if (!crtc->state->active) {
+ /*
+ * Make sure we don't call initial_watermarks
+ * for ILK-style watermark updates.
+ */
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(crtc->state));
+ else
+ intel_update_watermarks(intel_crtc);
+ }
}
}
@@ -14422,7 +14412,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev);
+ intel_modeset_verify_disabled(dev, state);
}
/* Complete the events for pipes that have now been disabled */
@@ -14465,7 +14455,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_cstate = to_intel_crtc_state(crtc->state);
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_cstate);
+ dev_priv->display.optimize_watermarks(intel_state,
+ intel_cstate);
}
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -14474,7 +14465,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14557,10 +14548,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * nonblocking commits are only safe for pure plane updates. Everything else
- * should work though.
- *
* RETURNS
* Zero for success or -errno.
*/
@@ -14572,11 +14559,6 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (intel_state->modeset && nonblock) {
- DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
- return -EINVAL;
- }
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -14594,10 +14576,16 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- dev_priv->wm.skl_results = intel_state->wm_results;
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ }
+
drm_atomic_state_get(state);
INIT_WORK(&state->commit_work,
nonblock ? intel_atomic_commit_work : NULL);
@@ -14720,8 +14708,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct intel_atomic_state *intel_state =
to_intel_atomic_state(new_state->state);
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
@@ -14775,10 +14762,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
GFP_KERNEL);
if (ret < 0)
return ret;
+
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
@@ -14811,7 +14800,7 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14822,7 +14811,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev)->cursor_needs_physical))
+ !INTEL_INFO(dev_priv)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
}
@@ -14900,30 +14889,32 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(crtc->state);
- struct intel_crtc_state *old_intel_state =
+ struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
- enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
- return;
+ goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
- if (intel_cstate->update_pipe) {
- intel_update_pipe_config(intel_crtc, old_intel_state);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ if (intel_cstate->update_pipe)
+ intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
- I915_WRITE(PIPE_WM_LINETIME(pipe),
- intel_cstate->wm.skl.optimal.linetime);
- }
+out:
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(old_intel_state,
+ intel_cstate);
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14989,11 +14980,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
state->scaler_id = -1;
}
primary->pipe = pipe;
- primary->plane = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
+ primary->plane = (enum plane) !pipe;
+ else
+ primary->plane = (enum plane) pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->plane = !pipe;
if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
@@ -15046,6 +15042,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
} else if (INTEL_GEN(dev_priv) >= 4) {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
@@ -15147,13 +15147,13 @@ intel_update_cursor_plane(struct drm_plane *plane,
{
struct drm_crtc *crtc = crtc_state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
if (!obj)
addr = 0;
- else if (!INTEL_INFO(dev)->cursor_needs_physical)
+ else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -15280,14 +15280,14 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (!plane) {
+ if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
}
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (!cursor) {
+ if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto fail;
}
@@ -15299,16 +15299,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- /*
- * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
intel_crtc->pipe = pipe;
- intel_crtc->plane = (enum plane) pipe;
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = !pipe;
- }
+ intel_crtc->plane = primary->plane;
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
@@ -15401,11 +15393,9 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
return true;
}
-static bool intel_crt_present(struct drm_device *dev)
+static bool intel_crt_present(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return false;
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
@@ -15479,7 +15469,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
intel_lvds_init(dev);
- if (intel_crt_present(dev))
+ if (intel_crt_present(dev_priv))
intel_crt_init(dev);
if (IS_BROXTON(dev_priv)) {
@@ -15527,7 +15517,7 @@ static void intel_setup_outputs(struct drm_device *dev)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
+ dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
@@ -15570,14 +15560,14 @@ static void intel_setup_outputs(struct drm_device *dev)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
@@ -15634,7 +15624,7 @@ static void intel_setup_outputs(struct drm_device *dev)
} else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
- if (SUPPORTS_TV(dev))
+ if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev);
intel_psr_init(dev);
@@ -15689,6 +15679,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
+ if (obj->pin_display && obj->cache_dirty)
+ i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
@@ -15769,7 +15761,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_INFO(dev)->gen < 9) {
+ if (INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
@@ -15832,7 +15824,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_GEN(dev_priv) > 3) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15840,7 +15832,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_INFO(dev)->gen < 9) {
+ INTEL_GEN(dev_priv) < 9) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15849,7 +15841,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -15866,7 +15858,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 5) {
+ if (INTEL_GEN(dev_priv) < 5) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
@@ -16295,9 +16287,8 @@ static void intel_init_quirks(struct drm_device *dev)
}
/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
+static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
@@ -16339,6 +16330,7 @@ static void sanitize_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct drm_modeset_acquire_ctx ctx;
@@ -16367,12 +16359,14 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ intel_state = to_intel_atomic_state(state);
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16396,7 +16390,7 @@ retry:
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(cs);
+ dev_priv->display.optimize_watermarks(intel_state, cs);
}
put_state:
@@ -16429,7 +16423,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev_priv);
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
/*
@@ -16475,8 +16469,8 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev)->num_pipes,
- INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+ INTEL_INFO(dev_priv)->num_pipes,
+ INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
int ret;
@@ -16497,7 +16491,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
intel_setup_outputs(dev);
drm_modeset_lock_all(dev);
@@ -16564,11 +16558,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val;
- if (INTEL_INFO(dev)->num_pipes == 1)
+ if (INTEL_INFO(dev_priv)->num_pipes == 1)
return true;
val = I915_READ(DSPCNTR(!crtc->plane));
@@ -16642,7 +16635,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
@@ -16744,21 +16737,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
-void i915_redisable_vga_power_on(struct drm_device *dev)
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev);
+ i915_disable_vga(dev_priv);
}
}
-void i915_redisable_vga(struct drm_device *dev)
+void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
@@ -16769,7 +16759,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
- i915_redisable_vga_power_on(dev);
+ i915_redisable_vga_power_on(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
@@ -16841,7 +16831,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- crtc->active ? "enabled" : "disabled");
+ enableddisabled(crtc->active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -16874,9 +16864,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id,
- encoder->base.name,
- encoder->base.crtc ? "enabled" : "disabled",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
pipe_name(pipe));
}
@@ -16905,9 +16894,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id,
- connector->base.name,
- connector->base.encoder ? "enabled" : "disabled");
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
for_each_intel_crtc(dev, crtc) {
@@ -17155,10 +17143,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
/*
* set vga decode state - true == enable VGA decode
*/
-int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
@@ -17312,16 +17299,15 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
@@ -17335,13 +17321,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9df331b3305b..90283edcafba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -942,14 +942,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, clock = 0;
- bool has_aux_irq = HAS_AUX_IRQ(dev);
+ bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
pps_lock(intel_dp);
@@ -1542,8 +1542,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1578,7 +1577,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
@@ -1791,9 +1790,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) &&
- pipe_config->limited_color_range)
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -2515,8 +2512,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2735,7 +2731,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2777,7 +2774,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -2787,7 +2784,7 @@ static void g4x_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(intel_dp);
}
@@ -2924,7 +2921,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
@@ -2942,7 +2939,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder, pipe_config);
+ intel_enable_dp(encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -2979,13 +2976,12 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_INFO(dev)->gen >= 9) {
+ else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -4873,15 +4869,13 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_device *dev, enum port port)
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return false;
if (port == PORT_A)
@@ -5483,7 +5477,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_INFO(dev)->gen <= 6) {
+ if (INTEL_GEN(dev_priv) <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
@@ -5657,7 +5651,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5666,7 +5660,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
@@ -5678,7 +5672,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev, port))
+ if (intel_dp_is_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -5742,7 +5736,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5816,7 +5810,7 @@ bool intel_dp_init(struct drm_device *dev,
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_INFO(dev)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3ffbd69e4551..b029d1026a28 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -43,7 +43,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
bpp = 24;
/*
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 21853a17b6d9..58a756f2f224 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -188,13 +188,12 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
/* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return;
if (pll == NULL)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 398195bf6dd1..cd132c216a67 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -294,6 +294,9 @@ struct intel_connector {
*/
struct intel_encoder *encoder;
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
@@ -652,7 +655,6 @@ struct intel_crtc_state {
bool double_wide;
- bool dp_encoder_is_mst;
int pbn;
struct intel_crtc_scaler_state scaler_state;
@@ -728,9 +730,6 @@ struct intel_crtc {
bool cxsr_allowed;
} wm;
- /* gen9+: ddb allocation currently being used */
- struct skl_ddb_entry hw_ddb;
-
int scanline_offset;
struct {
@@ -1187,7 +1186,9 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
@@ -1392,7 +1393,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1738,18 +1739,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- struct intel_crtc *intel_crtc);
-void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb);
-void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- int plane);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 9f279a3d0f74..0d8ff0034b88 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -774,9 +774,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
- DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
- DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
- "disabled" : "enabled");
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
@@ -795,8 +794,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
- intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
- "disabled" : "enabled");
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 841f8d1e1410..3da4d466e332 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -102,6 +102,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
+ /* Nothing to do here, execute in order of dependencies */
+ engine->schedule = NULL;
+
dev_priv->engine[id] = engine;
return 0;
}
@@ -236,8 +239,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
+ engine->execlist_queue = RB_ROOT;
+ engine->execlist_first = NULL;
intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index e230d480c5e6..62f215b12eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -48,17 +48,17 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
{
- return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
+ return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
}
static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen < 4;
+ return INTEL_GEN(dev_priv) < 4;
}
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen <= 3;
+ return INTEL_GEN(dev_priv) <= 3;
}
/*
@@ -351,7 +351,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -365,9 +365,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -381,7 +381,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -561,7 +561,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+ if (ret && INTEL_GEN(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -594,7 +594,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->threshold = ret;
- if (INTEL_INFO(dev_priv)->gen >= 5)
+ if (INTEL_GEN(dev_priv) >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
@@ -708,10 +708,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -812,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
@@ -854,9 +854,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (intel_vgpu_active(dev_priv)) {
@@ -874,16 +873,6 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
- fbc->no_fbc_reason = "no enabled pipes can have FBC";
- return false;
- }
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
- fbc->no_fbc_reason = "no enabled planes can have FBC";
- return false;
- }
-
return true;
}
@@ -1066,23 +1055,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- bool fbc_crtc_present = false;
- int i, j;
+ bool crtc_chosen = false;
+ int i;
mutex_lock(&fbc->lock);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (fbc->crtc == to_intel_crtc(crtc)) {
- fbc_crtc_present = true;
- break;
- }
- }
- /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
- if (!fbc_crtc_present && fbc->crtc != NULL)
+ /* Does this atomic commit involve the CRTC currently tied to FBC? */
+ if (fbc->crtc &&
+ !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ goto out;
+
+ if (!intel_fbc_can_enable(dev_priv))
goto out;
/* Simply choose the first CRTC that is compatible and has a visible
@@ -1092,25 +1077,29 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
+ struct intel_crtc_state *intel_crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
if (!intel_plane_state->base.visible)
continue;
- for_each_crtc_in_state(state, crtc, crtc_state, j) {
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ continue;
- if (plane_state->crtc != crtc)
- continue;
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
+ continue;
- if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
- break;
+ intel_crtc_state = to_intel_crtc_state(
+ drm_atomic_get_existing_crtc_state(state, &crtc->base));
- intel_crtc_state->enable_fbc = true;
- goto out;
- }
+ intel_crtc_state->enable_fbc = true;
+ crtc_chosen = true;
+ break;
}
+ if (!crtc_chosen)
+ fbc->no_fbc_reason = "no suitable CRTC for FBC";
+
out:
mutex_unlock(&fbc->lock);
}
@@ -1386,7 +1375,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
/* This value was pulled out of someone's hat */
- if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index e87221bba475..fc958d5ed0dc 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -356,7 +356,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
@@ -509,7 +509,7 @@ retry:
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
+ num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
@@ -697,11 +697,11 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
+ if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -714,7 +714,7 @@ int intel_fbdev_init(struct drm_device *dev)
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev)->num_pipes, 4);
+ INTEL_INFO(dev_priv)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 76ceb539f9f0..7bab41218cf7 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
- return;
+ return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ return true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 1aa85236b788..34d6ad2cf7c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -566,7 +566,7 @@ fail:
ret = 0;
}
- if (err == 0 && !HAS_GUC_UCODE(dev))
+ if (err == 0 && !HAS_GUC_UCODE(dev_priv))
; /* Don't mention the GuC! */
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
@@ -725,18 +725,18 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- if (!HAS_GUC(dev)) {
+ if (!HAS_GUC(dev_priv)) {
i915.enable_guc_loading = 0;
i915.enable_guc_submission = 0;
} else {
/* A negative value means "use platform default" */
if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
- if (!HAS_GUC_UCODE(dev)) {
+ if (!HAS_GUC_UCODE(dev_priv)) {
fw_path = NULL;
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 35ada4e1c6cf..fb88e32e25a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -975,14 +975,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
}
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
@@ -991,21 +993,20 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
@@ -1040,8 +1041,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
- if (crtc->config->pipe_bpp > 24 &&
- crtc->config->pixel_multiplier > 1) {
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1055,8 +1056,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
POSTING_READ(intel_hdmi->hdmi_reg);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
@@ -1073,7 +1074,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1086,7 +1087,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
* 4. enable HDMI clock gating
*/
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
@@ -1098,7 +1099,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- if (crtc->config->pipe_bpp > 24) {
+ if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
@@ -1110,8 +1111,8 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
- if (crtc->config->has_audio)
- intel_enable_hdmi_audio(encoder);
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
@@ -1178,9 +1179,7 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
@@ -1190,9 +1189,7 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
@@ -1645,13 +1642,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_hdmi_prepare(encoder);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
}
@@ -1663,9 +1659,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1674,7 +1668,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
+ pipe_config->has_hdmi_sink,
adjusted_mode);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b5811a..3d546c019de0 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -501,7 +501,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (intel_connector->mst_port)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
intel_connector->encoder->hpd_pin > HPD_NONE) {
connector->polled = enabled ?
DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index dde04b7643b1..0a09024d6ca3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -432,8 +432,10 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *cursor, *last;
+ struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
+ unsigned long flags;
+ struct rb_node *rb;
bool submit = false;
last = port->request;
@@ -469,8 +471,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->execlist_lock);
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *cursor =
+ rb_entry(rb, typeof(*cursor), priotree.node);
+
/* Can we combine this request with the current port? It has to
* be the same context/ringbuffer and not have any exceptions
* (e.g. GVT saying never to combine contexts).
@@ -493,7 +499,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* context (even though a different request) to
* the second port.
*/
- if (ctx_single_port_submission(cursor->ctx))
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(cursor->ctx))
break;
GEM_BUG_ON(last->ctx == cursor->ctx);
@@ -501,17 +508,30 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
port++;
}
+
+ rb = rb_next(rb);
+ rb_erase(&cursor->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&cursor->priotree.node);
+ cursor->priotree.priority = INT_MAX;
+
+ /* We keep the previous context alive until we retire the
+ * following request. This ensures that any the context object
+ * is still pinned for any residual writes the HW makes into it
+ * on the context switch into the next object following the
+ * breadcrumb. Otherwise, we may retire the context too early.
+ */
+ cursor->previous_context = engine->last_context;
+ engine->last_context = cursor->ctx;
+
+ __i915_gem_request_submit(cursor);
last = cursor;
submit = true;
}
if (submit) {
- /* Decouple all the requests submitted from the queue */
- engine->execlist_queue.next = &cursor->execlist_link;
- cursor->execlist_link.prev = &engine->execlist_queue;
-
i915_gem_request_assign(&port->request, last);
+ engine->execlist_first = rb;
}
- spin_unlock(&engine->execlist_lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
if (submit)
execlists_submit_ports(engine);
@@ -614,27 +634,147 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
+static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+{
+ struct rb_node **p, *rb;
+ bool first = true;
+
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ p = &root->rb_node;
+ while (*p) {
+ struct i915_priotree *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), node);
+ if (pt->priority > pos->priority) {
+ p = &rb->rb_left;
+ } else {
+ p = &rb->rb_right;
+ first = false;
+ }
+ }
+ rb_link_node(&pt->node, rb, p);
+ rb_insert_color(&pt->node, root);
+
+ return first;
+}
+
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
- spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
-
- list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ if (insert_request(&request->priotree, &engine->execlist_queue))
+ engine->execlist_first = &request->priotree.node;
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_irqrestore(&engine->execlist_lock, flags);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+static struct intel_engine_cs *
+pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine;
+
+ engine = container_of(pt,
+ struct drm_i915_gem_request,
+ priotree)->engine;
+ if (engine != locked) {
+ if (locked)
+ spin_unlock_irq(&locked->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
+ }
+
+ return engine;
+}
+
+static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+{
+ static DEFINE_MUTEX(lock);
+ struct intel_engine_cs *engine = NULL;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ LIST_HEAD(dfs);
+
+ if (prio <= READ_ONCE(request->priotree.priority))
+ return;
+
+ /* Need global lock to use the temporary link inside i915_dependency */
+ mutex_lock(&lock);
+
+ stack.signaler = &request->priotree;
+ list_add(&stack.dfs_link, &dfs);
+
+ /* Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_priotree *pt, prio) {
+ * list_for_each_entry(dep, &pt->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * insert_request(pt);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ list_for_each_entry(p, &pt->signalers_list, signal_link)
+ if (prio > READ_ONCE(p->signaler->priority))
+ list_move_tail(&p->dfs_link, &dfs);
+
+ p = list_next_entry(dep, dfs_link);
+ if (!RB_EMPTY_NODE(&pt->node))
+ continue;
+
+ engine = pt_lock_engine(pt, engine);
+
+ /* If it is not already in the rbtree, we can update the
+ * priority inplace and skip over it (and its dependencies)
+ * if it is referenced *again* as we descend the dfs.
+ */
+ if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
+ pt->priority = prio;
+ list_del_init(&dep->dfs_link);
+ }
+ }
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_priotree *pt = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = pt_lock_engine(pt, engine);
+
+ if (prio <= pt->priority)
+ continue;
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
+
+ pt->priority = prio;
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
+
+ if (engine)
+ spin_unlock_irq(&engine->timeline->lock);
+
+ mutex_unlock(&lock);
+
+ /* XXX Do we need to preempt to make room for us and our deps? */
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -1673,8 +1813,10 @@ void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
+ }
}
static void
@@ -1687,6 +1829,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index de7b3e6ed477..d12ef0047d49 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,8 +122,7 @@ out:
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -139,12 +138,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
tmp = I915_READ(PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7acbbbf97833..f4429f67a4e3 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -642,24 +642,6 @@ static struct notifier_block intel_opregion_notifier = {
* (version 3)
*/
-static u32 get_did(struct intel_opregion *opregion, int i)
-{
- u32 did;
-
- if (i < ARRAY_SIZE(opregion->acpi->didl)) {
- did = opregion->acpi->didl[i];
- } else {
- i -= ARRAY_SIZE(opregion->acpi->didl);
-
- if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
- return 0;
-
- did = opregion->acpi->did2[i];
- }
-
- return did;
-}
-
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
@@ -674,11 +656,11 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct drm_connector *connector)
+static u32 acpi_display_type(struct intel_connector *connector)
{
u32 display_type;
- switch (connector->connector_type) {
+ switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
display_type = ACPI_DISPLAY_TYPE_VGA;
@@ -707,7 +689,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
default:
- MISSING_CASE(connector->connector_type);
+ MISSING_CASE(connector->base.connector_type);
display_type = ACPI_DISPLAY_TYPE_OTHER;
break;
}
@@ -718,34 +700,9 @@ static u32 acpi_display_type(struct drm_connector *connector)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_connector *connector;
- acpi_handle handle;
- struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
- unsigned long long device_id;
- acpi_status status;
- u32 temp, max_outputs;
- int i = 0;
-
- handle = ACPI_HANDLE(&pdev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev))
- return;
-
- if (acpi_is_video_device(handle))
- acpi_video_bus = acpi_dev;
- else {
- list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
- if (acpi_is_video_device(acpi_cdev->handle)) {
- acpi_video_bus = acpi_cdev;
- break;
- }
- }
- }
-
- if (!acpi_video_bus) {
- DRM_DEBUG_KMS("No ACPI video bus found\n");
- return;
- }
+ struct intel_connector *connector;
+ int i = 0, max_outputs;
+ int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -757,64 +714,58 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
- max_outputs);
- return;
- }
- status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
- NULL, &device_id);
- if (ACPI_SUCCESS(status)) {
- if (!device_id)
- goto blind_set;
- set_did(opregion, i++, (u32)(device_id & 0x0f0f));
- }
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ if (i < max_outputs)
+ set_did(opregion, i, device_id);
+ i++;
}
-end:
DRM_DEBUG_KMS("%d outputs detected\n", i);
+ if (i > max_outputs)
+ DRM_ERROR("More than %d outputs in connector list\n",
+ max_outputs);
+
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
- return;
-
-blind_set:
- i = 0;
- list_for_each_entry(connector,
- &dev_priv->drm.mode_config.connector_list, head) {
- int display_type = acpi_display_type(connector);
-
- if (i >= max_outputs) {
- DRM_DEBUG_KMS("More than %u outputs in connector list\n",
- max_outputs);
- return;
- }
-
- temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | display_type | i);
- i++;
- }
- goto end;
}
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_connector *connector;
int i = 0;
- u32 disp_id;
-
- /* Initialize the CADL field by duplicating the DIDL values.
- * Technically, this is not always correct as display outputs may exist,
- * but not active. This initialization is necessary for some Clevo
- * laptops that check this field before processing the brightness and
- * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
- * there are less than eight devices. */
- do {
- disp_id = get_did(opregion, i);
- opregion->acpi->cadl[i] = disp_id;
- } while (++i < 8 && disp_id != 0);
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ for_each_intel_connector(&dev_priv->drm, connector) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
}
void intel_opregion_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index be4b4d546fd9..08ab6d762ca4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -304,7 +304,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -325,7 +325,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i965_scale_aspect(pipe_config, &pfit_control);
else
i9xx_scale_aspect(pipe_config, &pfit_control,
@@ -339,7 +339,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -355,7 +355,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -366,7 +366,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
pipe_config->gmch_pfit.control = pfit_control;
@@ -1722,7 +1722,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
- panel->backlight.enabled ? "enabled" : "disabled",
+ enableddisabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index cc9e0c0f445f..e207dc69e8b3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -347,8 +347,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
return;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n",
- enable ? "enabled" : "disabled");
+ DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
}
@@ -1061,7 +1060,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
- const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ const int sr_fifo_size =
+ INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
struct intel_plane *plane;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
@@ -1091,15 +1091,16 @@ static void vlv_invert_wms(struct intel_crtc *crtc)
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+ int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
+ wm_state->num_levels = dev_priv->wm.max_level + 1;
wm_state->num_active_planes = 0;
@@ -1179,7 +1180,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
+ for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
@@ -1861,23 +1862,25 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
-static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 3072;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
return 768;
else
return 512;
}
-static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
- int level, bool is_sprite)
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_INFO(dev)->gen >= 7)
+ else if (INTEL_GEN(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -1888,18 +1891,18 @@ static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
return level == 0 ? 63 : 255;
}
-static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
- int level)
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_INFO(dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return 31;
else
return 15;
@@ -1912,7 +1915,8 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -1920,14 +1924,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev)->num_pipes;
+ fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_INFO(dev)->gen <= 6)
+ if (INTEL_GEN(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -1943,7 +1947,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
@@ -1956,7 +1960,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev, level);
+ return ilk_cursor_wm_reg_max(to_i915(dev), level);
}
static void ilk_compute_wm_maximums(const struct drm_device *dev,
@@ -1968,17 +1972,17 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
}
-static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev, level, false);
- max->spr = ilk_plane_wm_reg_max(dev, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev, level);
- max->fbc = ilk_fbc_wm_reg_max(dev);
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
@@ -2382,7 +2386,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2401,7 +2405,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev, 1, &max);
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
@@ -2530,7 +2534,7 @@ static void ilk_wm_merge(struct drm_device *dev,
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
+ merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -2593,6 +2597,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -2619,7 +2624,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -2630,7 +2635,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
+ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -2780,7 +2785,6 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2836,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -3118,7 +3122,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = to_intel_crtc(for_crtc)->hw_ddb;
+ /*
+ * alloc may be cleared by clear_intel_crtc_state,
+ * copy from old state to be sure
+ */
+ *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
return;
}
@@ -3624,6 +3632,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_min_scanlines = 4;
}
+ if (apply_memory_bw_wa)
+ y_min_scanlines *= 2;
+
plane_bytes_per_line = width * cpp;
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
@@ -3644,8 +3655,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
plane_blocks_per_line);
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
- if (apply_memory_bw_wa)
- y_tile_minimum *= 2;
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
@@ -3906,25 +3915,16 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- struct intel_crtc *intel_crtc)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+ const struct skl_ddb_entry *ddb,
+ int ignore)
{
- struct drm_crtc *other_crtc;
- struct drm_crtc_state *other_cstate;
- struct intel_crtc *other_intel_crtc;
- const struct skl_ddb_entry *ddb =
- &to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb;
int i;
- for_each_crtc_in_state(state, other_crtc, other_cstate, i) {
- other_intel_crtc = to_intel_crtc(other_crtc);
-
- if (other_intel_crtc == intel_crtc)
- continue;
-
- if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb))
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (i != ignore && entries[i] &&
+ skl_ddb_entries_overlap(ddb, entries[i]))
return true;
- }
return false;
}
@@ -4193,14 +4193,35 @@ skl_compute_wm(struct drm_atomic_state *state)
return 0;
}
-static void skl_update_wm(struct intel_crtc *intel_crtc)
+static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
+{
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ enum pipe pipe = crtc->pipe;
+ int plane;
+
+ if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ return;
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
+
+ for_each_universal_plane(dev_priv, pipe, plane)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
+
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+}
+
+static void skl_initial_wm(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_wm_values *results = &state->wm_results;
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
@@ -4208,27 +4229,11 @@ static void skl_update_wm(struct intel_crtc *intel_crtc)
mutex_lock(&dev_priv->wm.wm_mutex);
- /*
- * If this pipe isn't active already, we're going to be enabling it
- * very soon. Since it's safe to update a pipe's ddb allocation while
- * the pipe's shut off, just do so here. Already active pipes will have
- * their watermarks updated once we update their planes.
- */
- if (intel_crtc->base.state->active_changed) {
- int plane;
-
- for_each_universal_plane(dev_priv, pipe, plane)
- skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
- &results->ddb, plane);
-
- skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR],
- &results->ddb);
- }
+ if (cstate->base.active_changed)
+ skl_atomic_update_crtc_wm(state, cstate);
skl_copy_wm_for_pipe(hw_vals, results, pipe);
- intel_crtc->hw_ddb = cstate->wm.skl.ddb;
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -4265,7 +4270,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_INFO(dev)->gen >= 7 &&
+ if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
@@ -4283,7 +4288,8 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4294,7 +4300,8 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4605,7 +4612,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
- if (INTEL_INFO(dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
@@ -7690,7 +7697,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.initial_watermarks = skl_initial_wm;
+ dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 271a3e29ff23..7b488e2793d9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -427,7 +427,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
@@ -472,7 +472,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
@@ -498,7 +498,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* - On HSW/BDW we get a recoverable frozen screen until next
* exit-activate sequence.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 700e93d80616..aeb637dc1fdf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1294,6 +1294,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
+ i915_gem_request_submit(request);
+
I915_WRITE_TAIL(request->engine, request->tail);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 642b54692d0d..3466b4e77e7c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,6 +267,15 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct drm_i915_gem_request *req);
+ /* Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ *
+ * Called under the struct_mutex.
+ */
+ void (*schedule)(struct drm_i915_gem_request *request,
+ int priority);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -335,12 +344,12 @@ struct intel_engine_cs {
/* Execlists */
struct tasklet_struct irq_tasklet;
- spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
} execlist_port[2];
- struct list_head execlist_queue;
+ struct rb_root execlist_queue;
+ struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
@@ -578,7 +587,6 @@ static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_kick_waiters(struct drm_i915_private *i915);
-unsigned int intel_kick_signalers(struct drm_i915_private *i915);
+unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 05994083e161..356c662ad453 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1066,7 +1066,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(&dev_priv->drm, pipe) {
+ for_each_pipe(dev_priv, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1097,7 +1097,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(&dev_priv->drm);
+ i915_redisable_vga_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3990c805a5b5..27808e91cb5a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1195,8 +1195,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_display_mode *mode = &crtc_state->base.mode;
@@ -1269,13 +1268,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
return;
/* Set the SDVO control regs. */
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
@@ -1294,7 +1293,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv)) {
@@ -1305,7 +1304,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_INFO(dev)->gen < 5)
+ INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0e9aac2e3eae..c2b629c922e7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,13 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- const struct skl_plane_wm *p_wm =
- &crtc_state->wm.skl.optimal.planes[plane];
u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -233,9 +228,6 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, plane);
-
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -291,19 +283,9 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- /*
- * We only populate skl_results on watermark updates, and if the
- * plane's visiblity isn't actually changing neither is its watermarks.
- */
- if (!dplane->state->visible)
- skl_write_plane_wm(to_intel_crtc(crtc),
- &cstate->wm.skl.optimal.planes[plane],
- &dev_priv->wm.skl_results.ddb, plane);
-
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -362,7 +344,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
- unsigned int rotation = dplane->state->rotation;
+ unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
@@ -427,6 +409,12 @@ vlv_update_plane(struct drm_plane *dplane,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -436,11 +424,11 @@ vlv_update_plane(struct drm_plane *dplane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SP_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
+ } else if (rotation & DRM_REFLECT_X) {
+ x += src_w;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -546,6 +534,9 @@ ivb_update_plane(struct drm_plane *plane,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
+ if (rotation & DRM_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
@@ -566,14 +557,11 @@ ivb_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- sprctl |= SPRITE_ROTATE_180;
-
- /* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- x += src_w;
- y += src_h;
- }
+ /* HSW+ does this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
+ rotation & DRM_ROTATE_180) {
+ x += src_w;
+ y += src_h;
}
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -684,6 +672,9 @@ ilk_update_plane(struct drm_plane *plane,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
+ if (rotation & DRM_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
@@ -700,9 +691,7 @@ ilk_update_plane(struct drm_plane *plane,
intel_add_fb_offsets(&x, &y, plane_state, 0);
dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == DRM_ROTATE_180) {
- dvscntr |= DVS_ROTATE_180;
-
+ if (rotation & DRM_ROTATE_180) {
x += src_w;
y += src_h;
}
@@ -1112,6 +1101,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180 |
+ DRM_REFLECT_X;
} else {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9212f00d5752..78cdfc6833d6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1029,8 +1029,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1116,7 +1115,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e2b188dcf908..d7be0d94ba4d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -402,6 +402,8 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
@@ -419,6 +421,10 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ /* Enable Decoupled MMIO only on BXT C stepping onwards */
+ if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ info->has_decoupled_mmio = false;
+
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
@@ -641,6 +647,8 @@ intel_fw_table_check(struct drm_i915_private *dev_priv)
num_ranges = dev_priv->uncore.fw_domains_table_entries;
for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(IS_GEN9(dev_priv) &&
+ (prev + 1) != (s32)ranges->start);
WARN_ON_ONCE(prev >= (s32)ranges->start);
prev = ranges->start;
WARN_ON_ONCE(prev >= (s32)ranges->end);
@@ -783,7 +791,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
@@ -831,6 +839,66 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+static const enum decoupled_power_domain fw2dpd_domain[] = {
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_BLITTER,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+/*
+ * Decoupled MMIO access for only 1 DWORD
+ */
+static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain,
+ enum decoupled_ops operation)
+{
+ enum decoupled_power_domain dp_domain;
+ u32 ctrl_reg_data = 0;
+
+ dp_domain = fw2dpd_domain[fw_domain - 1];
+
+ ctrl_reg_data |= reg;
+ ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
+ ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
+ ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv,
+ GEN9_DECOUPLED_REG0_DW1) &
+ GEN9_DECOUPLED_DW1_GO) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Decoupled MMIO wait timed out\n");
+}
+
+static inline u32
+__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
+ u32 reg,
+ enum forcewake_domains fw_domain)
+{
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_READ);
+
+ return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
+}
+
+static inline void
+__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 data,
+ enum forcewake_domains fw_domain)
+{
+
+ __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
+
+ __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
+ GEN9_DECOUPLED_OP_WRITE);
+}
+
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv);
@@ -935,6 +1003,28 @@ fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
GEN6_READ_FOOTER; \
}
+#define __gen9_decoupled_read(x) \
+static u##x \
+gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, bool trace) { \
+ enum forcewake_domains fw_engine; \
+ GEN6_READ_HEADER(x); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
+ unsigned i; \
+ u32 *ptr_data = (u32 *) &val; \
+ for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
+ *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
+ offset, \
+ fw_engine); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ GEN6_READ_FOOTER; \
+}
+
+__gen9_decoupled_read(32)
+__gen9_decoupled_read(64)
__fwtable_read(8)
__fwtable_read(16)
__fwtable_read(32)
@@ -1064,6 +1154,25 @@ fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bo
GEN6_WRITE_FOOTER; \
}
+#define __gen9_decoupled_write(x) \
+static void \
+gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
+ i915_reg_t reg, u##x val, \
+ bool trace) { \
+ enum forcewake_domains fw_engine; \
+ GEN6_WRITE_HEADER; \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
+ __gen9_decoupled_mmio_write(dev_priv, \
+ offset, \
+ val, \
+ fw_engine); \
+ else \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ GEN6_WRITE_FOOTER; \
+}
+
+__gen9_decoupled_write(32)
__fwtable_write(8)
__fwtable_write(16)
__fwtable_write(32)
@@ -1287,6 +1396,14 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
+ }
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
@@ -1368,7 +1485,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
+ (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
break;
}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db9621f1f0..8886cab19f98 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
- u8 not_common3[12];
+ u8 aux_channel;
+ u8 not_common3[11];
u8 iboost_level;
} __packed;
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d440bc11..38eabf65f19d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -330,7 +330,6 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03725fe89859..1c12a350eca3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -389,6 +389,11 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+ * priorities and the driver will attempt to execute batches in priority order.
+ */
+#define I915_PARAM_HAS_SCHEDULER 41
+
typedef struct drm_i915_getparam {
__s32 param;
/*