From 0189cb57b96ff92f75e3680b3710a46dacd6509f Mon Sep 17 00:00:00 2001 From: Xiyu Yang Date: Mon, 19 Jul 2021 13:59:45 +0800 Subject: fbmem: Convert from atomic_t to refcount_t on fb_info->count refcount_t type and corresponding API can protect refcounters from accidental underflow and overflow and further use-after-free situations. Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/1626674392-55857-1-git-send-email-xiyuyang19@fudan.edu.cn --- include/linux/fb.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fb.h b/include/linux/fb.h index a8dccd23c249..9023739e9a42 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -2,6 +2,7 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H +#include #include #include @@ -435,7 +436,7 @@ struct fb_tile_ops { struct fb_info { - atomic_t count; + refcount_t count; int node; int flags; /* -- cgit v1.2.3 From c715def51591a874a9fcfdc9a05d543e8797e697 Mon Sep 17 00:00:00 2001 From: Hridya Valsaraju Date: Mon, 12 Jul 2021 21:07:38 -0700 Subject: dma-buf: Delete the DMA-BUF attachment sysfs statistics The DMA-BUF attachment statistics form a subset of the DMA-BUF sysfs statistics that recently merged to the drm-misc tree. They are not UABI yet since they have not merged to the upstream Linux kernel. Since there has been a reported a performance regression due to the overhead of sysfs directory creation/teardown during dma_buf_attach()/dma_buf_detach(), this patch deletes the DMA-BUF attachment statistics from sysfs. Fixes: bdb8d06dfefd ("dmabuf: Add the capability to expose DMA-BUF stats in sysfs") Signed-off-by: Hridya Valsaraju Reviewed-by: Christian König Reviewed-by: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20210713040742.2680135-1-hridya@google.com Signed-off-by: Christian König --- .../ABI/testing/sysfs-kernel-dmabuf-buffers | 28 ----- drivers/dma-buf/dma-buf-sysfs-stats.c | 140 +-------------------- drivers/dma-buf/dma-buf-sysfs-stats.h | 27 ---- drivers/dma-buf/dma-buf.c | 16 --- include/linux/dma-buf.h | 17 --- 5 files changed, 4 insertions(+), 224 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers index a243984ed420..5d3bc997dc64 100644 --- a/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers +++ b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers @@ -22,31 +22,3 @@ KernelVersion: v5.13 Contact: Hridya Valsaraju Description: This file is read-only and specifies the size of the DMA-BUF in bytes. - -What: /sys/kernel/dmabuf/buffers//attachments -Date: May 2021 -KernelVersion: v5.13 -Contact: Hridya Valsaraju -Description: This directory will contain subdirectories representing every - attachment of the DMA-BUF. - -What: /sys/kernel/dmabuf/buffers//attachments/ -Date: May 2021 -KernelVersion: v5.13 -Contact: Hridya Valsaraju -Description: This directory will contain information on the attached device - and the number of current distinct device mappings. - -What: /sys/kernel/dmabuf/buffers//attachments//device -Date: May 2021 -KernelVersion: v5.13 -Contact: Hridya Valsaraju -Description: This file is read-only and is a symlink to the attached device's - sysfs entry. - -What: /sys/kernel/dmabuf/buffers//attachments//map_counter -Date: May 2021 -KernelVersion: v5.13 -Contact: Hridya Valsaraju -Description: This file is read-only and contains a map_counter indicating the - number of distinct device mappings of the attachment. diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index a2638e84199c..053baadcada9 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -40,14 +40,11 @@ * * * ``/sys/kernel/dmabuf/buffers//exporter_name`` * * ``/sys/kernel/dmabuf/buffers//size`` - * * ``/sys/kernel/dmabuf/buffers//attachments//device`` - * * ``/sys/kernel/dmabuf/buffers//attachments//map_counter`` * - * The information in the interface can also be used to derive per-exporter and - * per-device usage statistics. The data from the interface can be gathered - * on error conditions or other important events to provide a snapshot of - * DMA-BUF usage. It can also be collected periodically by telemetry to monitor - * various metrics. + * The information in the interface can also be used to derive per-exporter + * statistics. The data from the interface can be gathered on error conditions + * or other important events to provide a snapshot of DMA-BUF usage. + * It can also be collected periodically by telemetry to monitor various metrics. * * Detailed documentation about the interface is present in * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. @@ -121,120 +118,6 @@ static struct kobj_type dma_buf_ktype = { .default_groups = dma_buf_stats_default_groups, }; -#define to_dma_buf_attach_entry_from_kobj(x) container_of(x, struct dma_buf_attach_sysfs_entry, kobj) - -struct dma_buf_attach_stats_attribute { - struct attribute attr; - ssize_t (*show)(struct dma_buf_attach_sysfs_entry *sysfs_entry, - struct dma_buf_attach_stats_attribute *attr, char *buf); -}; -#define to_dma_buf_attach_stats_attr(x) container_of(x, struct dma_buf_attach_stats_attribute, attr) - -static ssize_t dma_buf_attach_stats_attribute_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct dma_buf_attach_stats_attribute *attribute; - struct dma_buf_attach_sysfs_entry *sysfs_entry; - - attribute = to_dma_buf_attach_stats_attr(attr); - sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); - - if (!attribute->show) - return -EIO; - - return attribute->show(sysfs_entry, attribute, buf); -} - -static const struct sysfs_ops dma_buf_attach_stats_sysfs_ops = { - .show = dma_buf_attach_stats_attribute_show, -}; - -static ssize_t map_counter_show(struct dma_buf_attach_sysfs_entry *sysfs_entry, - struct dma_buf_attach_stats_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%u\n", sysfs_entry->map_counter); -} - -static struct dma_buf_attach_stats_attribute map_counter_attribute = - __ATTR_RO(map_counter); - -static struct attribute *dma_buf_attach_stats_default_attrs[] = { - &map_counter_attribute.attr, - NULL, -}; -ATTRIBUTE_GROUPS(dma_buf_attach_stats_default); - -static void dma_buf_attach_sysfs_release(struct kobject *kobj) -{ - struct dma_buf_attach_sysfs_entry *sysfs_entry; - - sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); - kfree(sysfs_entry); -} - -static struct kobj_type dma_buf_attach_ktype = { - .sysfs_ops = &dma_buf_attach_stats_sysfs_ops, - .release = dma_buf_attach_sysfs_release, - .default_groups = dma_buf_attach_stats_default_groups, -}; - -void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) -{ - struct dma_buf_attach_sysfs_entry *sysfs_entry; - - sysfs_entry = attach->sysfs_entry; - if (!sysfs_entry) - return; - - sysfs_delete_link(&sysfs_entry->kobj, &attach->dev->kobj, "device"); - - kobject_del(&sysfs_entry->kobj); - kobject_put(&sysfs_entry->kobj); -} - -int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, - unsigned int uid) -{ - struct dma_buf_attach_sysfs_entry *sysfs_entry; - int ret; - struct dma_buf *dmabuf; - - if (!attach) - return -EINVAL; - - dmabuf = attach->dmabuf; - - sysfs_entry = kzalloc(sizeof(struct dma_buf_attach_sysfs_entry), - GFP_KERNEL); - if (!sysfs_entry) - return -ENOMEM; - - sysfs_entry->kobj.kset = dmabuf->sysfs_entry->attach_stats_kset; - - attach->sysfs_entry = sysfs_entry; - - ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_attach_ktype, - NULL, "%u", uid); - if (ret) - goto kobj_err; - - ret = sysfs_create_link(&sysfs_entry->kobj, &attach->dev->kobj, - "device"); - if (ret) - goto link_err; - - return 0; - -link_err: - kobject_del(&sysfs_entry->kobj); -kobj_err: - kobject_put(&sysfs_entry->kobj); - attach->sysfs_entry = NULL; - - return ret; -} void dma_buf_stats_teardown(struct dma_buf *dmabuf) { struct dma_buf_sysfs_entry *sysfs_entry; @@ -243,7 +126,6 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf) if (!sysfs_entry) return; - kset_unregister(sysfs_entry->attach_stats_kset); kobject_del(&sysfs_entry->kobj); kobject_put(&sysfs_entry->kobj); } @@ -290,7 +172,6 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf) { struct dma_buf_sysfs_entry *sysfs_entry; int ret; - struct kset *attach_stats_kset; if (!dmabuf || !dmabuf->file) return -EINVAL; @@ -315,21 +196,8 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf) if (ret) goto err_sysfs_dmabuf; - /* create the directory for attachment stats */ - attach_stats_kset = kset_create_and_add("attachments", - &dmabuf_sysfs_no_uevent_ops, - &sysfs_entry->kobj); - if (!attach_stats_kset) { - ret = -ENOMEM; - goto err_sysfs_attach; - } - - sysfs_entry->attach_stats_kset = attach_stats_kset; - return 0; -err_sysfs_attach: - kobject_del(&sysfs_entry->kobj); err_sysfs_dmabuf: kobject_put(&sysfs_entry->kobj); dmabuf->sysfs_entry = NULL; diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h index 5f4703249117..a49c6e2650cc 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.h +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -14,23 +14,8 @@ int dma_buf_init_sysfs_statistics(void); void dma_buf_uninit_sysfs_statistics(void); int dma_buf_stats_setup(struct dma_buf *dmabuf); -int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, - unsigned int uid); -static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, - int delta) -{ - struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry; - entry->map_counter += delta; -} void dma_buf_stats_teardown(struct dma_buf *dmabuf); -void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach); -static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) -{ - struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry; - - return entry->attachment_uid++; -} #else static inline int dma_buf_init_sysfs_statistics(void) @@ -44,19 +29,7 @@ static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) { return 0; } -static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, - unsigned int uid) -{ - return 0; -} static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} -static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {} -static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, - int delta) {} -static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) -{ - return 0; -} #endif #endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 510b42771974..b1a6db71c656 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -738,7 +738,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, { struct dma_buf_attachment *attach; int ret; - unsigned int attach_uid; if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); @@ -764,13 +763,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, } dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); - attach_uid = dma_buf_update_attach_uid(dmabuf); dma_resv_unlock(dmabuf->resv); - ret = dma_buf_attach_stats_setup(attach, attach_uid); - if (ret) - goto err_sysfs; - /* When either the importer or the exporter can't handle dynamic * mappings we cache the mapping here to avoid issues with the * reservation object lock. @@ -797,7 +791,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, dma_resv_unlock(attach->dmabuf->resv); attach->sgt = sgt; attach->dir = DMA_BIDIRECTIONAL; - dma_buf_update_attachment_map_count(attach, 1 /* delta */); } return attach; @@ -814,7 +807,6 @@ err_unlock: if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); -err_sysfs: dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } @@ -864,7 +856,6 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dma_resv_lock(attach->dmabuf->resv, NULL); __unmap_dma_buf(attach, attach->sgt, attach->dir); - dma_buf_update_attachment_map_count(attach, -1 /* delta */); if (dma_buf_is_dynamic(attach->dmabuf)) { dmabuf->ops->unpin(attach); @@ -878,7 +869,6 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); - dma_buf_attach_stats_teardown(attach); kfree(attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); @@ -1020,10 +1010,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } } #endif /* CONFIG_DMA_API_DEBUG */ - - if (!IS_ERR(sg_table)) - dma_buf_update_attachment_map_count(attach, 1 /* delta */); - return sg_table; } EXPORT_SYMBOL_GPL(dma_buf_map_attachment); @@ -1061,8 +1047,6 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) dma_buf_unpin(attach); - - dma_buf_update_attachment_map_count(attach, -1 /* delta */); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 2b814fde0d11..678b2006be78 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -444,15 +444,6 @@ struct dma_buf { struct dma_buf_sysfs_entry { struct kobject kobj; struct dma_buf *dmabuf; - - /** - * @sysfs_entry.attachment_uid: - * - * This is protected by the dma_resv_lock() on @resv and is - * incremented on each attach. - */ - unsigned int attachment_uid; - struct kset *attach_stats_kset; } *sysfs_entry; #endif }; @@ -504,7 +495,6 @@ struct dma_buf_attach_ops { * @importer_ops: importer operations for this attachment, if provided * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. * @importer_priv: importer specific attachment data. - * @sysfs_entry: For exposing information about this attachment in sysfs. * * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device @@ -525,13 +515,6 @@ struct dma_buf_attachment { const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; -#ifdef CONFIG_DMABUF_SYSFS_STATS - /* for sysfs stats */ - struct dma_buf_attach_sysfs_entry { - struct kobject kobj; - unsigned int map_counter; - } *sysfs_entry; -#endif }; /** -- cgit v1.2.3 From 51fdf0914f2689e7e2549da303bcb38843119b5c Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Wed, 14 Jul 2021 11:51:34 -0600 Subject: drm/print: fixup spelling in a comment s/prink/printk/ - no functional changes Signed-off-by: Jim Cromie Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210714175138.319514-2-jim.cromie@gmail.com --- include/drm/drm_print.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 9b66be54dd16..15a089a87c22 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -327,7 +327,7 @@ static inline bool drm_debug_enabled(enum drm_debug_category category) /* * struct device based logging * - * Prefer drm_device based logging over device or prink based logging. + * Prefer drm_device based logging over device or printk based logging. */ __printf(3, 4) -- cgit v1.2.3 From 0b0860a3cf5eccf183760b1177a1dcdb821b0b66 Mon Sep 17 00:00:00 2001 From: Desmond Cheong Zhi Xi Date: Mon, 12 Jul 2021 12:35:07 +0800 Subject: drm: serialize drm_file.master with a new spinlock Currently, drm_file.master pointers should be protected by drm_device.master_mutex when being dereferenced. This is because drm_file.master is not invariant for the lifetime of drm_file. If drm_file is not the creator of master, then drm_file.is_master is false, and a call to drm_setmaster_ioctl will invoke drm_new_set_master, which then allocates a new master for drm_file and puts the old master. Thus, without holding drm_device.master_mutex, the old value of drm_file.master could be freed while it is being used by another concurrent process. However, it is not always possible to lock drm_device.master_mutex to dereference drm_file.master. Through the fbdev emulation code, this might occur in a deep nest of other locks. But drm_device.master_mutex is also the outermost lock in the nesting hierarchy, so this leads to potential deadlocks. To address this, we introduce a new spin lock at the bottom of the lock hierarchy that only serializes drm_file.master. With this change, the value of drm_file.master changes only when both drm_device.master_mutex and drm_file.master_lookup_lock are held. Hence, any process holding either of those locks can ensure that the value of drm_file.master will not change concurrently. Since no lock depends on the new drm_file.master_lookup_lock, when drm_file.master is dereferenced, but drm_device.master_mutex cannot be held, we can safely protect the master pointer with drm_file.master_lookup_lock. Reported-by: Daniel Vetter Signed-off-by: Desmond Cheong Zhi Xi Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210712043508.11584-5-desmondcheongzx@gmail.com --- drivers/gpu/drm/drm_auth.c | 17 +++++++++++------ drivers/gpu/drm/drm_file.c | 1 + include/drm/drm_file.h | 12 +++++++++--- 3 files changed, 21 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index ab1863c5a5a0..30a239901b36 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -164,16 +164,18 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv, static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) { struct drm_master *old_master; + struct drm_master *new_master; lockdep_assert_held_once(&dev->master_mutex); WARN_ON(fpriv->is_master); old_master = fpriv->master; - fpriv->master = drm_master_create(dev); - if (!fpriv->master) { - fpriv->master = old_master; + new_master = drm_master_create(dev); + if (!new_master) return -ENOMEM; - } + spin_lock(&fpriv->master_lookup_lock); + fpriv->master = new_master; + spin_unlock(&fpriv->master_lookup_lock); fpriv->is_master = 1; fpriv->authenticated = 1; @@ -332,10 +334,13 @@ int drm_master_open(struct drm_file *file_priv) * any master object for render clients */ mutex_lock(&dev->master_mutex); - if (!dev->master) + if (!dev->master) { ret = drm_new_set_master(dev, file_priv); - else + } else { + spin_lock(&file_priv->master_lookup_lock); file_priv->master = drm_master_get(dev->master); + spin_unlock(&file_priv->master_lookup_lock); + } mutex_unlock(&dev->master_mutex); return ret; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index d4f0bac6f8f8..ceb1a9723855 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -176,6 +176,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) init_waitqueue_head(&file->event_wait); file->event_space = 4096; /* set aside 4k for event buffer */ + spin_lock_init(&file->master_lookup_lock); mutex_init(&file->event_read_lock); if (drm_core_check_feature(dev, DRIVER_GEM)) diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index b81b3bfb08c8..9b82988e3427 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -226,15 +226,21 @@ struct drm_file { /** * @master: * - * Master this node is currently associated with. Only relevant if - * drm_is_primary_client() returns true. Note that this only - * matches &drm_device.master if the master is the currently active one. + * Master this node is currently associated with. Protected by struct + * &drm_device.master_mutex, and serialized by @master_lookup_lock. + * + * Only relevant if drm_is_primary_client() returns true. Note that + * this only matches &drm_device.master if the master is the currently + * active one. * * See also @authentication and @is_master and the :ref:`section on * primary nodes and authentication `. */ struct drm_master *master; + /** @master_lock: Serializes @master. */ + spinlock_t master_lookup_lock; + /** @pid: Process that opened this file. */ struct pid *pid; -- cgit v1.2.3 From 56f0729a510f92151682ff6c89f69724d5595d6e Mon Sep 17 00:00:00 2001 From: Desmond Cheong Zhi Xi Date: Mon, 12 Jul 2021 12:35:08 +0800 Subject: drm: protect drm_master pointers in drm_lease.c drm_file->master pointers should be protected by drm_device.master_mutex or drm_file.master_lookup_lock when being dereferenced. However, in drm_lease.c, there are multiple instances where drm_file->master is accessed and dereferenced while neither lock is held. This makes drm_lease.c vulnerable to use-after-free bugs. We address this issue in 2 ways: 1. Add a new drm_file_get_master() function that calls drm_master_get on drm_file->master while holding on to drm_file.master_lookup_lock. Since drm_master_get increments the reference count of master, this prevents master from being freed until we unreference it with drm_master_put. 2. In each case where drm_file->master is directly accessed and eventually dereferenced in drm_lease.c, we wrap the access in a call to the new drm_file_get_master function, then unreference the master pointer once we are done using it. Reported-by: Daniel Vetter Signed-off-by: Desmond Cheong Zhi Xi Reviewed-by: Emil Velikov Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210712043508.11584-6-desmondcheongzx@gmail.com --- drivers/gpu/drm/drm_auth.c | 25 ++++++++++++++ drivers/gpu/drm/drm_lease.c | 81 ++++++++++++++++++++++++++++++++++----------- include/drm/drm_auth.h | 1 + include/drm/drm_file.h | 6 ++++ 4 files changed, 93 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 30a239901b36..f00354bec3fb 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -389,6 +389,31 @@ struct drm_master *drm_master_get(struct drm_master *master) } EXPORT_SYMBOL(drm_master_get); +/** + * drm_file_get_master - reference &drm_file.master of @file_priv + * @file_priv: DRM file private + * + * Increments the reference count of @file_priv's &drm_file.master and returns + * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL. + * + * Master pointers returned from this function should be unreferenced using + * drm_master_put(). + */ +struct drm_master *drm_file_get_master(struct drm_file *file_priv) +{ + struct drm_master *master = NULL; + + spin_lock(&file_priv->master_lookup_lock); + if (!file_priv->master) + goto unlock; + master = drm_master_get(file_priv->master); + +unlock: + spin_unlock(&file_priv->master_lookup_lock); + return master; +} +EXPORT_SYMBOL(drm_file_get_master); + static void drm_master_destroy(struct kref *kref) { struct drm_master *master = container_of(kref, struct drm_master, refcount); diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 00fb433bcef1..92eac73d9001 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -106,10 +106,19 @@ static bool _drm_has_leased(struct drm_master *master, int id) */ bool _drm_lease_held(struct drm_file *file_priv, int id) { - if (!file_priv || !file_priv->master) + bool ret; + struct drm_master *master; + + if (!file_priv) return true; - return _drm_lease_held_master(file_priv->master, id); + master = drm_file_get_master(file_priv); + if (!master) + return true; + ret = _drm_lease_held_master(master, id); + drm_master_put(&master); + + return ret; } /** @@ -128,13 +137,22 @@ bool drm_lease_held(struct drm_file *file_priv, int id) struct drm_master *master; bool ret; - if (!file_priv || !file_priv->master || !file_priv->master->lessor) + if (!file_priv) return true; - master = file_priv->master; + master = drm_file_get_master(file_priv); + if (!master) + return true; + if (!master->lessor) { + ret = true; + goto out; + } mutex_lock(&master->dev->mode_config.idr_mutex); ret = _drm_lease_held_master(master, id); mutex_unlock(&master->dev->mode_config.idr_mutex); + +out: + drm_master_put(&master); return ret; } @@ -154,10 +172,16 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) int count_in, count_out; uint32_t crtcs_out = 0; - if (!file_priv || !file_priv->master || !file_priv->master->lessor) + if (!file_priv) return crtcs_in; - master = file_priv->master; + master = drm_file_get_master(file_priv); + if (!master) + return crtcs_in; + if (!master->lessor) { + crtcs_out = crtcs_in; + goto out; + } dev = master->dev; count_in = count_out = 0; @@ -176,6 +200,9 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in) count_in++; } mutex_unlock(&master->dev->mode_config.idr_mutex); + +out: + drm_master_put(&master); return crtcs_out; } @@ -489,7 +516,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, size_t object_count; int ret = 0; struct idr leases; - struct drm_master *lessor = lessor_priv->master; + struct drm_master *lessor; struct drm_master *lessee = NULL; struct file *lessee_file = NULL; struct file *lessor_file = lessor_priv->filp; @@ -501,12 +528,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; - /* Do not allow sub-leases */ - if (lessor->lessor) { - DRM_DEBUG_LEASE("recursive leasing not allowed\n"); - return -EINVAL; - } - /* need some objects */ if (cl->object_count == 0) { DRM_DEBUG_LEASE("no objects in lease\n"); @@ -518,12 +539,22 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, return -EINVAL; } + lessor = drm_file_get_master(lessor_priv); + /* Do not allow sub-leases */ + if (lessor->lessor) { + DRM_DEBUG_LEASE("recursive leasing not allowed\n"); + ret = -EINVAL; + goto out_lessor; + } + object_count = cl->object_count; object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), array_size(object_count, sizeof(__u32))); - if (IS_ERR(object_ids)) - return PTR_ERR(object_ids); + if (IS_ERR(object_ids)) { + ret = PTR_ERR(object_ids); + goto out_lessor; + } idr_init(&leases); @@ -534,14 +565,15 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, if (ret) { DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret); idr_destroy(&leases); - return ret; + goto out_lessor; } /* Allocate a file descriptor for the lease */ fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK)); if (fd < 0) { idr_destroy(&leases); - return fd; + ret = fd; + goto out_lessor; } DRM_DEBUG_LEASE("Creating lease\n"); @@ -577,6 +609,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, /* Hook up the fd */ fd_install(fd, lessee_file); + drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); return 0; @@ -586,6 +619,8 @@ out_lessee: out_leases: put_unused_fd(fd); +out_lessor: + drm_master_put(&lessor); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; } @@ -608,7 +643,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, struct drm_mode_list_lessees *arg = data; __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr); __u32 count_lessees = arg->count_lessees; - struct drm_master *lessor = lessor_priv->master, *lessee; + struct drm_master *lessor, *lessee; int count; int ret = 0; @@ -619,6 +654,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessor = drm_file_get_master(lessor_priv); DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -642,6 +678,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev, arg->count_lessees = count; mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessor); return ret; } @@ -661,7 +698,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, struct drm_mode_get_lease *arg = data; __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr); __u32 count_objects = arg->count_objects; - struct drm_master *lessee = lessee_priv->master; + struct drm_master *lessee; struct idr *object_idr; int count; void *entry; @@ -675,6 +712,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessee = drm_file_get_master(lessee_priv); DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id); mutex_lock(&dev->mode_config.idr_mutex); @@ -702,6 +740,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev, arg->count_objects = count; mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessee); return ret; } @@ -720,7 +759,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, void *data, struct drm_file *lessor_priv) { struct drm_mode_revoke_lease *arg = data; - struct drm_master *lessor = lessor_priv->master; + struct drm_master *lessor; struct drm_master *lessee; int ret = 0; @@ -730,6 +769,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; + lessor = drm_file_get_master(lessor_priv); mutex_lock(&dev->mode_config.idr_mutex); lessee = _drm_find_lessee(lessor, arg->lessee_id); @@ -750,6 +790,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev, fail: mutex_unlock(&dev->mode_config.idr_mutex); + drm_master_put(&lessor); return ret; } diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 6bf8b2b78991..f99d3417f304 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -107,6 +107,7 @@ struct drm_master { }; struct drm_master *drm_master_get(struct drm_master *master); +struct drm_master *drm_file_get_master(struct drm_file *file_priv); void drm_master_put(struct drm_master **master); bool drm_is_current_master(struct drm_file *fpriv); diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 9b82988e3427..726cfe0ff5f5 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -233,6 +233,12 @@ struct drm_file { * this only matches &drm_device.master if the master is the currently * active one. * + * When dereferencing this pointer, either hold struct + * &drm_device.master_mutex for the duration of the pointer's use, or + * use drm_file_get_master() if struct &drm_device.master_mutex is not + * currently held and there is no other need to hold it. This prevents + * @master from being freed during use. + * * See also @authentication and @is_master and the :ref:`section on * primary nodes and authentication `. */ -- cgit v1.2.3 From 26a4dc29b74a137f45665089f6d3d633fcc9b662 Mon Sep 17 00:00:00 2001 From: Juan A. Suarez Romero Date: Tue, 8 Jun 2021 13:15:41 +0200 Subject: drm/v3d: Expose performance counters to userspace The V3D engine has several hardware performance counters that can of interest for userspace performance analysis tools. This exposes new ioctls to create and destroy performance monitor objects, as well as to query the counter values. Each created performance monitor object has an ID that can be attached to CL/CSD submissions, so the driver enables the requested counters when the job is submitted, and updates the performance monitor values when the job is done. It is up to the user to ensure all the jobs have been finished before getting the performance monitor values. It is also up to the user to properly synchronize BCL jobs when submitting jobs with different performance monitors attached. Cc: Daniel Vetter Cc: David Airlie Cc: Emma Anholt To: dri-devel@lists.freedesktop.org Signed-off-by: Juan A. Suarez Romero Acked-by: Melissa Wen Signed-off-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/20210608111541.461991-1-jasuarez@igalia.com --- drivers/gpu/drm/v3d/Makefile | 1 + drivers/gpu/drm/v3d/v3d_drv.c | 8 ++ drivers/gpu/drm/v3d/v3d_drv.h | 63 +++++++++++ drivers/gpu/drm/v3d/v3d_gem.c | 31 ++++++ drivers/gpu/drm/v3d/v3d_perfmon.c | 213 ++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_regs.h | 2 + drivers/gpu/drm/v3d/v3d_sched.c | 16 +++ include/uapi/drm/v3d_drm.h | 136 ++++++++++++++++++++++++ 8 files changed, 470 insertions(+) create mode 100644 drivers/gpu/drm/v3d/v3d_perfmon.c (limited to 'include') diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index db4cfc155821..e8b314137020 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -9,6 +9,7 @@ v3d-y := \ v3d_gem.o \ v3d_irq.o \ v3d_mmu.o \ + v3d_perfmon.o \ v3d_trace_points.o \ v3d_sched.o diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 99e22beea90b..9403c3b36aca 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -94,6 +94,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH: args->value = 1; return 0; + case DRM_V3D_PARAM_SUPPORTS_PERFMON: + args->value = (v3d->ver >= 40); + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -121,6 +124,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file) 1, NULL); } + v3d_perfmon_open_file(v3d_priv); file->driver_priv = v3d_priv; return 0; @@ -136,6 +140,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); } + v3d_perfmon_close_file(v3d_priv); kfree(v3d_priv); } @@ -156,6 +161,9 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), }; static const struct drm_driver v3d_drm_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 8a390738d65b..270134779073 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -37,6 +37,40 @@ struct v3d_queue_state { u64 emit_seqno; }; +/* Performance monitor object. The perform lifetime is controlled by userspace + * using perfmon related ioctls. A perfmon can be attached to a submit_cl + * request, and when this is the case, HW perf counters will be activated just + * before the submit_cl is submitted to the GPU and disabled when the job is + * done. This way, only events related to a specific job will be counted. + */ +struct v3d_perfmon { + /* Tracks the number of users of the perfmon, when this counter reaches + * zero the perfmon is destroyed. + */ + refcount_t refcnt; + + /* Protects perfmon stop, as it can be invoked from multiple places. */ + struct mutex lock; + + /* Number of counters activated in this perfmon instance + * (should be less than DRM_V3D_MAX_PERF_COUNTERS). + */ + u8 ncounters; + + /* Events counted by the HW perf counters. */ + u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; + + /* Storage for counter values. Counters are incremented by the + * HW perf counter values every time the perfmon is attached + * to a GPU job. This way, perfmon users don't have to + * retrieve the results after each job if they want to track + * events covering several submissions. Note that counter + * values can't be reset, but you can fake a reset by + * destroying the perfmon and creating a new one. + */ + u64 values[]; +}; + struct v3d_dev { struct drm_device drm; @@ -89,6 +123,9 @@ struct v3d_dev { */ spinlock_t job_lock; + /* Used to track the active perfmon if any. */ + struct v3d_perfmon *active_perfmon; + /* Protects bo_stats */ struct mutex bo_lock; @@ -133,6 +170,11 @@ v3d_has_csd(struct v3d_dev *v3d) struct v3d_file_priv { struct v3d_dev *v3d; + struct { + struct idr idr; + struct mutex lock; + } perfmon; + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; }; @@ -205,6 +247,11 @@ struct v3d_job { */ struct dma_fence *done_fence; + /* Pointer to a performance monitor object if the user requested it, + * NULL otherwise. + */ + struct v3d_perfmon *perfmon; + /* Callback for the freeing of the job on refcount going to 0. */ void (*free)(struct kref *ref); }; @@ -353,3 +400,19 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo); /* v3d_sched.c */ int v3d_sched_init(struct v3d_dev *v3d); void v3d_sched_fini(struct v3d_dev *v3d); + +/* v3d_perfmon.c */ +void v3d_perfmon_get(struct v3d_perfmon *perfmon); +void v3d_perfmon_put(struct v3d_perfmon *perfmon); +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon); +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture); +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id); +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv); +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv); +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 4eb354226972..5689da118197 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -126,6 +126,8 @@ v3d_reset(struct v3d_dev *v3d) v3d_mmu_set_page_table(v3d); v3d_irq_reset(v3d); + v3d_perfmon_stop(v3d, v3d->active_perfmon, false); + trace_v3d_reset_end(dev); } @@ -375,6 +377,9 @@ v3d_job_free(struct kref *ref) pm_runtime_mark_last_busy(job->v3d->drm.dev); pm_runtime_put_autosuspend(job->v3d->drm.dev); + if (job->perfmon) + v3d_perfmon_put(job->perfmon); + kfree(job); } @@ -539,6 +544,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); + if (args->pad != 0) + return -EINVAL; + if (args->flags != 0 && args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { DRM_INFO("invalid flags: %d\n", args->flags); @@ -611,8 +619,20 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + if (args->perfmon_id) { + render->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + + if (!render->base.perfmon) { + ret = -ENOENT; + goto fail; + } + } + mutex_lock(&v3d->sched_lock); if (bin) { + bin->base.perfmon = render->base.perfmon; + v3d_perfmon_get(bin->base.perfmon); ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); if (ret) goto fail_unreserve; @@ -633,6 +653,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = drm_gem_fence_array_add(&clean_job->deps, render_fence); if (ret) goto fail_unreserve; + clean_job->perfmon = render->base.perfmon; + v3d_perfmon_get(clean_job->perfmon); ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); if (ret) goto fail_unreserve; @@ -827,6 +849,15 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + if (args->perfmon_id) { + job->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + if (!job->base.perfmon) { + ret = -ENOENT; + goto fail; + } + } + mutex_lock(&v3d->sched_lock); ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD); if (ret) diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c new file mode 100644 index 000000000000..0288ef063513 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Raspberry Pi + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_PERFMONID_MIN 1 +#define V3D_PERFMONID_MAX U32_MAX + +void v3d_perfmon_get(struct v3d_perfmon *perfmon) +{ + if (perfmon) + refcount_inc(&perfmon->refcnt); +} + +void v3d_perfmon_put(struct v3d_perfmon *perfmon) +{ + if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + kfree(perfmon); +} + +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) +{ + unsigned int i; + u32 mask; + u8 ncounters = perfmon->ncounters; + + if (WARN_ON_ONCE(!perfmon || v3d->active_perfmon)) + return; + + mask = GENMASK(ncounters - 1, 0); + + for (i = 0; i < ncounters; i++) { + u32 source = i / 4; + u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); + + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S1); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S2); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S3); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); + } + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); + V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); + + v3d->active_perfmon = perfmon; +} + +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture) +{ + unsigned int i; + + if (!perfmon || !v3d->active_perfmon) + return; + + mutex_lock(&perfmon->lock); + if (perfmon != v3d->active_perfmon) { + mutex_unlock(&perfmon->lock); + return; + } + + if (capture) + for (i = 0; i < perfmon->ncounters; i++) + perfmon->values[i] += V3D_CORE_READ(0, V3D_PCTR_0_PCTRX(i)); + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, 0); + + v3d->active_perfmon = NULL; + mutex_unlock(&perfmon->lock); +} + +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id) +{ + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + return perfmon; +} + +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) +{ + mutex_init(&v3d_priv->perfmon.lock); + idr_init(&v3d_priv->perfmon.idr); +} + +static int v3d_perfmon_idr_del(int id, void *elem, void *data) +{ + struct v3d_perfmon *perfmon = elem; + + v3d_perfmon_put(perfmon); + + return 0; +} + +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) +{ + mutex_lock(&v3d_priv->perfmon.lock); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_destroy(&v3d_priv->perfmon.idr); + mutex_unlock(&v3d_priv->perfmon.lock); +} + +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_create *req = data; + struct v3d_perfmon *perfmon; + unsigned int i; + int ret; + + /* Number of monitored counters cannot exceed HW limits. */ + if (req->ncounters > DRM_V3D_MAX_PERF_COUNTERS || + !req->ncounters) + return -EINVAL; + + /* Make sure all counters are valid. */ + for (i = 0; i < req->ncounters; i++) { + if (req->counters[i] >= V3D_PERFCNT_NUM) + return -EINVAL; + } + + perfmon = kzalloc(struct_size(perfmon, values, req->ncounters), + GFP_KERNEL); + if (!perfmon) + return -ENOMEM; + + for (i = 0; i < req->ncounters; i++) + perfmon->counters[i] = req->counters[i]; + + perfmon->ncounters = req->ncounters; + + refcount_set(&perfmon->refcnt, 1); + mutex_init(&perfmon->lock); + + mutex_lock(&v3d_priv->perfmon.lock); + ret = idr_alloc(&v3d_priv->perfmon.idr, perfmon, V3D_PERFMONID_MIN, + V3D_PERFMONID_MAX, GFP_KERNEL); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (ret < 0) { + kfree(perfmon); + return ret; + } + + req->id = ret; + + return 0; +} + +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_destroy *req = data; + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_remove(&v3d_priv->perfmon.idr, req->id); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_put(perfmon); + + return 0; +} + +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_get_values *req = data; + struct v3d_perfmon *perfmon; + int ret = 0; + + if (req->pad != 0) + return -EINVAL; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_stop(v3d, perfmon, true); + + if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->values, + perfmon->ncounters * sizeof(u64))) + ret = -EFAULT; + + v3d_perfmon_put(perfmon); + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 9bcb57781d31..3663e0d6bf76 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -347,6 +347,8 @@ /* Each src reg muxes four counters each. */ #define V3D_V4_PCTR_0_SRC_0_3 0x00660 #define V3D_V4_PCTR_0_SRC_28_31 0x0067c +#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ + 4 * (x)) # define V3D_PCTR_S0_MASK V3D_MASK(6, 0) # define V3D_PCTR_S0_SHIFT 0 # define V3D_PCTR_S1_MASK V3D_MASK(14, 8) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index a39bdd5cfc4f..dd7fcc36d726 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -63,6 +63,16 @@ v3d_job_free(struct drm_sched_job *sched_job) v3d_job_put(job); } +static void +v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) +{ + if (job->perfmon != v3d->active_perfmon) + v3d_perfmon_stop(v3d, v3d->active_perfmon, true); + + if (job->perfmon && v3d->active_perfmon != job->perfmon) + v3d_perfmon_start(v3d, job->perfmon); +} + /* * Returns the fences that the job depends on, one by one. * @@ -120,6 +130,8 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, job->start, job->end); + v3d_switch_perfmon(v3d, &job->base); + /* Set the current and end address of the control list. * Writing the end register is what starts the job. */ @@ -169,6 +181,8 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); + v3d_switch_perfmon(v3d, &job->base); + /* XXX: Set the QCFG */ /* Set the current and end address of the control list. @@ -240,6 +254,8 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + v3d_switch_perfmon(v3d, &job->base); + for (i = 1; i <= 6; i++) V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); /* CFG0 write kicks off the job. */ diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 1ce746e228d9..4104f22fb3d3 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -38,6 +38,9 @@ extern "C" { #define DRM_V3D_GET_BO_OFFSET 0x05 #define DRM_V3D_SUBMIT_TFU 0x06 #define DRM_V3D_SUBMIT_CSD 0x07 +#define DRM_V3D_PERFMON_CREATE 0x08 +#define DRM_V3D_PERFMON_DESTROY 0x09 +#define DRM_V3D_PERFMON_GET_VALUES 0x0a #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -47,6 +50,12 @@ extern "C" { #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd) +#define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \ + struct drm_v3d_perfmon_create) +#define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \ + struct drm_v3d_perfmon_destroy) +#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ + struct drm_v3d_perfmon_get_values) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 @@ -127,6 +136,11 @@ struct drm_v3d_submit_cl { __u32 bo_handle_count; __u32 flags; + + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ + __u32 perfmon_id; + + __u32 pad; }; /** @@ -195,6 +209,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_TFU, DRM_V3D_PARAM_SUPPORTS_CSD, DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, + DRM_V3D_PARAM_SUPPORTS_PERFMON, }; struct drm_v3d_get_param { @@ -258,6 +273,127 @@ struct drm_v3d_submit_csd { __u32 in_sync; /* Sync object to signal when the CSD job is done. */ __u32 out_sync; + + /* ID of the perfmon to attach to this job. 0 means no perfmon. */ + __u32 perfmon_id; +}; + +enum { + V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, + V3D_PERFCNT_FEP_VALID_PRIMS, + V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS, + V3D_PERFCNT_FEP_VALID_QUADS, + V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL, + V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL, + V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS, + V3D_PERFCNT_TLB_QUADS_ZERO_COV, + V3D_PERFCNT_TLB_QUADS_NONZERO_COV, + V3D_PERFCNT_TLB_QUADS_WRITTEN, + V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD, + V3D_PERFCNT_PTB_PRIM_CLIP, + V3D_PERFCNT_PTB_PRIM_REV, + V3D_PERFCNT_QPU_IDLE_CYCLES, + V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER, + V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG, + V3D_PERFCNT_QPU_CYCLES_VALID_INSTR, + V3D_PERFCNT_QPU_CYCLES_TMU_STALL, + V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL, + V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL, + V3D_PERFCNT_QPU_IC_HIT, + V3D_PERFCNT_QPU_IC_MISS, + V3D_PERFCNT_QPU_UC_HIT, + V3D_PERFCNT_QPU_UC_MISS, + V3D_PERFCNT_TMU_TCACHE_ACCESS, + V3D_PERFCNT_TMU_TCACHE_MISS, + V3D_PERFCNT_VPM_VDW_STALL, + V3D_PERFCNT_VPM_VCD_STALL, + V3D_PERFCNT_BIN_ACTIVE, + V3D_PERFCNT_RDR_ACTIVE, + V3D_PERFCNT_L2T_HITS, + V3D_PERFCNT_L2T_MISSES, + V3D_PERFCNT_CYCLE_COUNT, + V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER, + V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT, + V3D_PERFCNT_PTB_PRIMS_BINNED, + V3D_PERFCNT_AXI_WRITES_WATCH_0, + V3D_PERFCNT_AXI_READS_WATCH_0, + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0, + V3D_PERFCNT_AXI_READ_STALLS_WATCH_0, + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0, + V3D_PERFCNT_AXI_READ_BYTES_WATCH_0, + V3D_PERFCNT_AXI_WRITES_WATCH_1, + V3D_PERFCNT_AXI_READS_WATCH_1, + V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1, + V3D_PERFCNT_AXI_READ_STALLS_WATCH_1, + V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1, + V3D_PERFCNT_AXI_READ_BYTES_WATCH_1, + V3D_PERFCNT_TLB_PARTIAL_QUADS, + V3D_PERFCNT_TMU_CONFIG_ACCESSES, + V3D_PERFCNT_L2T_NO_ID_STALL, + V3D_PERFCNT_L2T_COM_QUE_STALL, + V3D_PERFCNT_L2T_TMU_WRITES, + V3D_PERFCNT_TMU_ACTIVE_CYCLES, + V3D_PERFCNT_TMU_STALLED_CYCLES, + V3D_PERFCNT_CLE_ACTIVE, + V3D_PERFCNT_L2T_TMU_READS, + V3D_PERFCNT_L2T_CLE_READS, + V3D_PERFCNT_L2T_VCD_READS, + V3D_PERFCNT_L2T_TMUCFG_READS, + V3D_PERFCNT_L2T_SLC0_READS, + V3D_PERFCNT_L2T_SLC1_READS, + V3D_PERFCNT_L2T_SLC2_READS, + V3D_PERFCNT_L2T_TMU_W_MISSES, + V3D_PERFCNT_L2T_TMU_R_MISSES, + V3D_PERFCNT_L2T_CLE_MISSES, + V3D_PERFCNT_L2T_VCD_MISSES, + V3D_PERFCNT_L2T_TMUCFG_MISSES, + V3D_PERFCNT_L2T_SLC0_MISSES, + V3D_PERFCNT_L2T_SLC1_MISSES, + V3D_PERFCNT_L2T_SLC2_MISSES, + V3D_PERFCNT_CORE_MEM_WRITES, + V3D_PERFCNT_L2T_MEM_WRITES, + V3D_PERFCNT_PTB_MEM_WRITES, + V3D_PERFCNT_TLB_MEM_WRITES, + V3D_PERFCNT_CORE_MEM_READS, + V3D_PERFCNT_L2T_MEM_READS, + V3D_PERFCNT_PTB_MEM_READS, + V3D_PERFCNT_PSE_MEM_READS, + V3D_PERFCNT_TLB_MEM_READS, + V3D_PERFCNT_GMP_MEM_READS, + V3D_PERFCNT_PTB_W_MEM_WORDS, + V3D_PERFCNT_TLB_W_MEM_WORDS, + V3D_PERFCNT_PSE_R_MEM_WORDS, + V3D_PERFCNT_TLB_R_MEM_WORDS, + V3D_PERFCNT_TMU_MRU_HITS, + V3D_PERFCNT_COMPUTE_ACTIVE, + V3D_PERFCNT_NUM, +}; + +#define DRM_V3D_MAX_PERF_COUNTERS 32 + +struct drm_v3d_perfmon_create { + __u32 id; + __u32 ncounters; + __u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; +}; + +struct drm_v3d_perfmon_destroy { + __u32 id; +}; + +/* + * Returns the values of the performance counters tracked by this + * perfmon (as an array of ncounters u64 values). + * + * No implicit synchronization is performed, so the user has to + * guarantee that any jobs using this perfmon have already been + * completed (probably by blocking on the seqno returned by the + * last exec that used the perfmon). + */ +struct drm_v3d_perfmon_get_values { + __u32 id; + __u32 pad; + __u64 values_ptr; }; #if defined(__cplusplus) -- cgit v1.2.3 From 77e21b50acab326173716830ef15a2f237f2d198 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jul 2021 08:16:28 +0200 Subject: vgaarb: remove VGA_DEFAULT_DEVICE The define is entirely unused. Signed-off-by: Christoph Hellwig Link: https://patchwork.freedesktop.org/patch/msgid/20210716061634.2446357-2-hch@lst.de Acked-by: Christian König Signed-off-by: Christian König --- include/linux/vgaarb.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index dc6ddce92066..26ec8a057d2a 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -42,12 +42,6 @@ #define VGA_RSRC_NORMAL_IO 0x04 #define VGA_RSRC_NORMAL_MEM 0x08 -/* Passing that instead of a pci_dev to use the system "default" - * device, that is the one used by vgacon. Archs will probably - * have to provide their own vga_default_device(); - */ -#define VGA_DEFAULT_DEVICE (NULL) - struct pci_dev; /* For use by clients */ -- cgit v1.2.3 From b0b514abc4cf2841ee1e0833252b2e8a78401276 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jul 2021 08:16:29 +0200 Subject: vgaarb: remove vga_conflicts vga_conflicts only has a single caller and none of the arch overrides mentioned in the comment. Just remove it and the thus dead check in the caller. Signed-off-by: Christoph Hellwig Link: https://patchwork.freedesktop.org/patch/msgid/20210716061634.2446357-3-hch@lst.de Acked-by: Christian König Signed-off-by: Christian König --- drivers/gpu/vga/vgaarb.c | 6 ------ include/linux/vgaarb.h | 12 ------------ 2 files changed, 18 deletions(-) (limited to 'include') diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 949fde433ea2..fccc7ef5153a 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -284,12 +284,6 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if (vgadev == conflict) continue; - /* Check if the architecture allows a conflict between those - * 2 devices or if they are on separate domains - */ - if (!vga_conflicts(vgadev->pdev, conflict->pdev)) - continue; - /* We have a possible conflict. before we go further, we must * check if we sit on the same bus as the conflicting device. * if we don't, then we must tie both IO and MEM resources diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 26ec8a057d2a..ca5160218538 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -122,18 +122,6 @@ static inline void vga_set_default_device(struct pci_dev *pdev) { } static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } #endif -/* - * Architectures should define this if they have several - * independent PCI domains that can afford concurrent VGA - * decoding - */ -#ifndef __ARCH_HAS_VGA_CONFLICT -static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) -{ - return 1; -} -#endif - #if defined(CONFIG_VGA_ARB) int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), -- cgit v1.2.3 From 45549c00d3ff05735e7ceb89b20e302301cd6b14 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jul 2021 08:16:30 +0200 Subject: vgaarb: move the kerneldoc for vga_set_legacy_decoding to vgaarb.c Kerneldoc comments should be at the implementation side, not in the header just declaring the prototype. Signed-off-by: Christoph Hellwig Link: https://patchwork.freedesktop.org/patch/msgid/20210716061634.2446357-4-hch@lst.de Acked-by: Christian König Signed-off-by: Christian König --- drivers/gpu/vga/vgaarb.c | 11 +++++++++++ include/linux/vgaarb.h | 13 ------------- 2 files changed, 11 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index fccc7ef5153a..3ed3734f66d9 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -834,6 +834,17 @@ bail: spin_unlock_irqrestore(&vga_lock, flags); } +/** + * vga_set_legacy_decoding + * @pdev: pci device of the VGA card + * @decodes: bit mask of what legacy regions the card decodes + * + * Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA + * Memory, both, or none. All cards default to both, the card driver (fbdev for + * example) should tell the arbiter if it has disabled legacy decoding, so the + * card can be left out of the arbitration process (and can be safe to take + * interrupts at any time. + */ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) { __vga_set_legacy_decoding(pdev, decodes, false); diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index ca5160218538..fdce9007d57e 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -46,19 +46,6 @@ struct pci_dev; /* For use by clients */ -/** - * vga_set_legacy_decoding - * - * @pdev: pci device of the VGA card - * @decodes: bit mask of what legacy regions the card decodes - * - * Indicates to the arbiter if the card decodes legacy VGA IOs, - * legacy VGA Memory, both, or none. All cards default to both, - * the card driver (fbdev for example) should tell the arbiter - * if it has disabled legacy decoding, so the card can be left - * out of the arbitration process (and can be safe to take - * interrupts at any time. - */ #if defined(CONFIG_VGA_ARB) extern void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); -- cgit v1.2.3 From 6609176f56ad895ba25d4c120c707fb15f45aa4e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jul 2021 08:16:31 +0200 Subject: vgaarb: cleanup vgaarb.h Merge the different CONFIG_VGA_ARB ifdef blocks, remove superflous externs, and regularize the stubs for !CONFIG_VGA_ARB. Signed-off-by: Christoph Hellwig Link: https://patchwork.freedesktop.org/patch/msgid/20210716061634.2446357-5-hch@lst.de Acked-by: Christian König Signed-off-by: Christian König --- include/linux/vgaarb.h | 90 +++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index fdce9007d57e..05171fc7e26a 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -33,6 +33,8 @@ #include