diff options
author | Dave Airlie | 2019-04-24 11:54:26 +1000 |
---|---|---|
committer | Dave Airlie | 2019-04-24 11:56:32 +1000 |
commit | 70b5f09e4389c789263c6d79c539b8378e3af3bc (patch) | |
tree | 0c1efa5d6b1c0a0f5984c08a724f83a931d08482 /drivers/gpu/drm/msm/msm_gem.c | |
parent | 42f1a013300dca601d779b02ed6d41f7b2cea362 (diff) | |
parent | b02872df58aca66d0e7af3ec5065dbc6f0630dd1 (diff) |
Merge tag 'drm-msm-next-2019-04-21' of https://gitlab.freedesktop.org/drm/msm into drm-next
This time around it is a bunch of cleanup and fixes, expanding gpu
"zap" shader support (so we can take the GPU out of secure mode on
boot) to a6xx, and small UABI extension to support robustness (see
mesa MR 673).
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsHwsEfi4y2LYKSqeqDEYvffwVgKhiP8jHcHpxp13J5LQ@mail.gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 42 |
1 files changed, 41 insertions, 1 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a72c648ba6e7..31d5a744d84f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -851,8 +851,18 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) /* don't call directly! Use drm_gem_object_put() and friends */ void msm_gem_free_object(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; + + if (llist_add(&msm_obj->freed, &priv->free_list)) + queue_work(priv->wq, &priv->free_work); +} + +static void free_object(struct msm_gem_object *msm_obj) +{ + struct drm_gem_object *obj = &msm_obj->base; + struct drm_device *dev = obj->dev; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -887,6 +897,29 @@ void msm_gem_free_object(struct drm_gem_object *obj) kfree(msm_obj); } +void msm_gem_free_work(struct work_struct *work) +{ + struct msm_drm_private *priv = + container_of(work, struct msm_drm_private, free_work); + struct drm_device *dev = priv->dev; + struct llist_node *freed; + struct msm_gem_object *msm_obj, *next; + + while ((freed = llist_del_all(&priv->free_list))) { + + mutex_lock(&dev->struct_mutex); + + llist_for_each_entry_safe(msm_obj, next, + freed, freed) + free_object(msm_obj); + + mutex_unlock(&dev->struct_mutex); + + if (need_resched()) + break; + } +} + /* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, @@ -1017,6 +1050,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; + /* + * Our buffers are kept pinned, so allocating them from the + * MOVABLE zone is a really bad idea, and conflicts with CMA. + * See comments above new_inode() why this is required _and_ + * expected if you're going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } return obj; |