diff options
author | Dave Airlie | 2015-10-20 09:01:49 +1000 |
---|---|---|
committer | Dave Airlie | 2015-10-20 09:01:49 +1000 |
commit | affa0e033b04996700434312c76df3c78f683870 (patch) | |
tree | 4d67cd508404360dccd4dbd0f7bf71059eb7624c /drivers/gpu/drm | |
parent | 2dd3a88ac8c0ef7737335babfbacf79be68cfbea (diff) | |
parent | a0fb6ad7ae28a4dce34c010028dc070eeacae1d9 (diff) |
Merge tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel into drm-next
More drm-misc for 4.4.
- fb refcount fix in atomic fbdev
- various locking reworks to reduce drm_global_mutex and dev->struct_mutex
- rename docbook to gpu.tmpl and include vga_switcheroo stuff, plus more
vga_switcheroo (Lukas Wunner)
- viewport check fixes for atomic drivers from Ville
- DRM_DEBUG_VBL from Ville
- non-contentious header fixes from Mikko Rapeli
- small things all over
* tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel: (31 commits)
drm/fb-helper: Fix fb refcounting in pan_display_atomic
drm/fb-helper: Set plane rotation directly
drm: fix mutex leak in drm_dp_get_mst_branch_device
drm: Check plane src coordinates correctly during page flip for atomic drivers
drm: Check crtc viewport correctly with rotated primary plane on atomic drivers
drm: Refactor plane src coordinate checks
drm: Swap w/h when converting the mode to src coordidates for a rotated primary plane
drm: Don't leak fb when plane crtc coodinates are bad
ALSA: hda - Spell vga_switcheroo consistently
drm/gem: Use kref_get_unless_zero for the weak mmap references
drm/vgem: Drop vgem_drm_gem_mmap
drm: Fix return value of drm_framebuffer_init()
drm/gem: Use container_of in drm_gem_object_free
drm/gem: Check locking in drm_gem_object_unreference
drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj
drm/i810_drm.h: include drm/drm.h
r128_drm.h: include drm/drm.h
savage_drm.h: include <drm/drm.h>
gpu/doc: Convert to markdown harder
gpu/doc: Add vga_switcheroo documentation
...
Diffstat (limited to 'drivers/gpu/drm')
27 files changed, 296 insertions, 335 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dd85a0ae05c3..1618e2294a16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -689,18 +689,18 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, } const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), /* KMS */ - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 63d909e5f63b..1cbb080f0c4a 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -163,12 +163,9 @@ static void armada_drm_disable_vblank(struct drm_device *dev, unsigned int pipe) } static struct drm_ioctl_desc armada_ioctls[] = { - DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl, - DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, - DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, - DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0), + DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0), + DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0), }; static void armada_drm_lastclose(struct drm_device *dev) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 87a2a446d2b7..0c6f62168776 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1790,8 +1790,13 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, primary_state->crtc_w = set->mode->hdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; - primary_state->src_h = set->mode->vdisplay << 16; - primary_state->src_w = set->mode->hdisplay << 16; + if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { + primary_state->src_h = set->mode->hdisplay << 16; + primary_state->src_w = set->mode->vdisplay << 16; + } else { + primary_state->src_h = set->mode->vdisplay << 16; + primary_state->src_w = set->mode->hdisplay << 16; + } commit: ret = update_output_state(state, set); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e7c842289568..e54660a858e1 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -306,8 +306,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev, * reference counted modeset objects like framebuffers. * * Returns: - * New unique (relative to other objects in @dev) integer identifier for the - * object. + * Zero on success, error code on failure. */ int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) @@ -423,7 +422,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, out: mutex_unlock(&dev->mode_config.fb_lock); - return 0; + return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -677,7 +676,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; - crtc->invert_dimensions = false; drm_modeset_lock_init(&crtc->mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); @@ -2286,6 +2284,32 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format) return -EINVAL; } +static int check_src_coords(uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + const struct drm_framebuffer *fb) +{ + unsigned int fb_width, fb_height; + + fb_width = fb->width << 16; + fb_height = fb->height << 16; + + /* Make sure source coordinates are inside the fb. */ + if (src_w > fb_width || + src_x > fb_width - src_w || + src_h > fb_height || + src_y > fb_height - src_h) { + DRM_DEBUG_KMS("Invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", + src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, + src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, + src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, + src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); + return -ENOSPC; + } + + return 0; +} + /* * setplane_internal - setplane handler for internal callers * @@ -2305,7 +2329,6 @@ static int __setplane_internal(struct drm_plane *plane, uint32_t src_w, uint32_t src_h) { int ret = 0; - unsigned int fb_width, fb_height; /* No fb means shut it down */ if (!fb) { @@ -2342,27 +2365,13 @@ static int __setplane_internal(struct drm_plane *plane, crtc_y > INT_MAX - (int32_t) crtc_h) { DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", crtc_w, crtc_h, crtc_x, crtc_y); - return -ERANGE; + ret = -ERANGE; + goto out; } - - fb_width = fb->width << 16; - fb_height = fb->height << 16; - - /* Make sure source coordinates are inside the fb. */ - if (src_w > fb_width || - src_x > fb_width - src_w || - src_h > fb_height || - src_y > fb_height - src_h) { - DRM_DEBUG_KMS("Invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", - src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, - src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, - src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, - src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); - ret = -ENOSPC; + ret = check_src_coords(src_x, src_y, src_w, src_h, fb); + if (ret) goto out; - } plane->old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, @@ -2553,20 +2562,13 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); - if (crtc->invert_dimensions) + if (crtc->state && + crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) | + BIT(DRM_ROTATE_270))) swap(hdisplay, vdisplay); - if (hdisplay > fb->width || - vdisplay > fb->height || - x > fb->width - hdisplay || - y > fb->height - vdisplay) { - DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", - fb->width, fb->height, hdisplay, vdisplay, x, y, - crtc->invert_dimensions ? " (inverted)" : ""); - return -ENOSPC; - } - - return 0; + return check_src_coords(x << 16, y << 16, + hdisplay << 16, vdisplay << 16, fb); } EXPORT_SYMBOL(drm_crtc_check_viewport); @@ -5181,7 +5183,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; } - ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); + if (crtc->state) { + const struct drm_plane_state *state = crtc->primary->state; + + ret = check_src_coords(state->src_x, state->src_y, + state->src_w, state->src_h, fb); + } else { + ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); + } if (ret) goto out; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index bf27a07dbce3..2d46fc6230c2 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1194,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - if (!port->mstb) { + mstb = port->mstb; + if (!mstb) { DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); - return NULL; + goto out; } - mstb = port->mstb; break; } } } kref_get(&mstb->kref); +out: mutex_unlock(&mgr->lock); return mstb; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index d01f8d6c5fdb..9362609df38a 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -37,11 +37,9 @@ #include "drm_legacy.h" #include "drm_internal.h" -unsigned int drm_debug = 0; /* 1 to enable debug output */ +unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */ EXPORT_SYMBOL(drm_debug); -bool drm_atomic = 0; - MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index bd6d4ab27512..e673c13c7391 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -360,11 +360,7 @@ retry: goto fail; } - ret = drm_atomic_plane_set_property(plane, plane_state, - dev->mode_config.rotation_property, - BIT(DRM_ROTATE_0)); - if (ret != 0) - goto fail; + plane_state->rotation = BIT(DRM_ROTATE_0); /* disable non-primary: */ if (plane->type == DRM_PLANE_TYPE_PRIMARY) @@ -1235,7 +1231,7 @@ int drm_fb_helper_set_par(struct fb_info *info) EXPORT_SYMBOL(drm_fb_helper_set_par); static int pan_display_atomic(struct fb_var_screeninfo *var, - struct fb_info *info) + struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; @@ -1253,6 +1249,8 @@ retry: mode_set = &fb_helper->crtc_info[i].mode_set; + mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb; + mode_set->x = var->xoffset; mode_set->y = var->yoffset; @@ -1268,13 +1266,34 @@ retry: info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; - return 0; fail: + for(i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set; + struct drm_plane *plane; + + mode_set = &fb_helper->crtc_info[i].mode_set; + plane = mode_set->crtc->primary; + + if (ret == 0) { + struct drm_framebuffer *new_fb = plane->state->fb; + + if (new_fb) + drm_framebuffer_reference(new_fb); + plane->fb = new_fb; + plane->crtc = plane->state->crtc; + + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); + } + plane->old_fb = NULL; + } + if (ret == -EDEADLK) goto backoff; - drm_atomic_state_free(state); + if (ret != 0) + drm_atomic_state_free(state); return ret; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 3c2d4abd71c5..64353d40db53 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -763,7 +763,8 @@ EXPORT_SYMBOL(drm_gem_object_release); void drm_gem_object_free(struct kref *kref) { - struct drm_gem_object *obj = (struct drm_gem_object *) kref; + struct drm_gem_object *obj = + container_of(kref, struct drm_gem_object, refcount); struct drm_device *dev = obj->dev; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -810,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close); * drm_gem_mmap() prevents unprivileged users from mapping random objects. So * callers must verify access restrictions before calling this helper. * - * NOTE: This function has to be protected with dev->struct_mutex - * * Return 0 or success or -EINVAL if the object size is smaller than the VMA * size, or if no gem_vm_ops are provided. */ @@ -820,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, { struct drm_device *dev = obj->dev; - lockdep_assert_held(&dev->struct_mutex); - /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; @@ -865,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; - struct drm_gem_object *obj; + struct drm_gem_object *obj = NULL; struct drm_vma_offset_node *node; int ret; if (drm_device_is_unplugged(dev)) return -ENODEV; - mutex_lock(&dev->struct_mutex); + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (likely(node)) { + obj = container_of(node, struct drm_gem_object, vma_node); + /* + * When the object is being freed, after it hits 0-refcnt it + * proceeds to tear down the object. In the process it will + * attempt to remove the VMA offset and so acquire this + * mgr->vm_lock. Therefore if we find an object with a 0-refcnt + * that matches our range, we know it is in the process of being + * destroyed and will be freed as soon as we release the lock - + * so we have to check for the 0-refcnted object and treat it as + * invalid. + */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - if (!node) { - mutex_unlock(&dev->struct_mutex); + if (!obj) return -EINVAL; - } else if (!drm_vma_node_is_allowed(node, filp)) { - mutex_unlock(&dev->struct_mutex); + + if (!drm_vma_node_is_allowed(node, filp)) { + drm_gem_object_unreference_unlocked(obj); return -EACCES; } - obj = container_of(node, struct drm_gem_object, vma_node); - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, + vma); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 86cc793cdf79..4fb4c45d039f 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -484,9 +484,7 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, struct drm_device *dev = obj->dev; int ret; - mutex_lock(&dev->struct_mutex); ret = drm_gem_mmap_obj(obj, obj->size, vma); - mutex_unlock(&dev->struct_mutex); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 530c501422fd..8ce2a0c59116 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -691,13 +691,16 @@ long drm_ioctl(struct file *filp, char stack_kdata[128]; char *kdata = NULL; unsigned int usize, asize, drv_size; + bool is_driver_ioctl; dev = file_priv->minor->dev; if (drm_device_is_unplugged(dev)) return -ENODEV; - if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) { + is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; + + if (is_driver_ioctl) { /* driver ioctl */ if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) goto err_i1; @@ -756,7 +759,10 @@ long drm_ioctl(struct file *filp, memset(kdata, 0, usize); } - if (ioctl->flags & DRM_UNLOCKED) + /* Enforce sane locking for kms driver ioctls. Core ioctls are + * too messy still. */ + if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || + (ioctl->flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 7bdf247b9681..eba6337f5860 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -213,17 +213,17 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); if (diff == 0 && flags & DRM_CALLED_FROM_VBLIRQ) - DRM_DEBUG("crtc %u: Redundant vblirq ignored." - " diff_ns = %lld, framedur_ns = %d)\n", - pipe, (long long) diff_ns, framedur_ns); + DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored." + " diff_ns = %lld, framedur_ns = %d)\n", + pipe, (long long) diff_ns, framedur_ns); } else { /* some kind of default for drivers w/o accurate vbl timestamping */ diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; } - DRM_DEBUG("updating vblank count on crtc %u:" - " current=%u, diff=%u, hw=%u hw_last=%u\n", - pipe, vblank->count, diff, cur_vblank, vblank->last); + DRM_DEBUG_VBL("updating vblank count on crtc %u:" + " current=%u, diff=%u, hw=%u hw_last=%u\n", + pipe, vblank->count, diff, cur_vblank, vblank->last); if (diff == 0) { WARN_ON_ONCE(cur_vblank != vblank->last); @@ -800,11 +800,11 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); - DRM_DEBUG("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", - pipe, vbl_status, hpos, vpos, - (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, - (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, - duration_ns/1000, i); + DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", + pipe, vbl_status, hpos, vpos, + (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, + (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, + duration_ns/1000, i); return ret; } @@ -1272,8 +1272,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe) list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; - DRM_DEBUG("Sending premature vblank event on disable: \ - wanted %d, current %d\n", + DRM_DEBUG("Sending premature vblank event on disable: " + "wanted %d, current %d\n", e->event.sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 68c1f32fb086..2f2ecde8285b 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -112,7 +112,7 @@ void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) EXPORT_SYMBOL(drm_vma_offset_manager_destroy); /** - * drm_vma_offset_lookup() - Find node in offset space + * drm_vma_offset_lookup_locked() - Find node in offset space * @mgr: Manager object * @start: Start address for object (page-based) * @pages: Size of object (page-based) @@ -122,37 +122,21 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy); * region and the given node will be returned, as long as the node spans the * whole requested area (given the size in number of pages as @pages). * - * RETURNS: - * Returns NULL if no suitable node can be found. Otherwise, the best match - * is returned. It's the caller's responsibility to make sure the node doesn't - * get destroyed before the caller can access it. - */ -struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, - unsigned long start, - unsigned long pages) -{ - struct drm_vma_offset_node *node; - - read_lock(&mgr->vm_lock); - node = drm_vma_offset_lookup_locked(mgr, start, pages); - read_unlock(&mgr->vm_lock); - - return node; -} -EXPORT_SYMBOL(drm_vma_offset_lookup); - -/** - * drm_vma_offset_lookup_locked() - Find node in offset space - * @mgr: Manager object - * @start: Start address for object (page-based) - * @pages: Size of object (page-based) + * Note that before lookup the vma offset manager lookup lock must be acquired + * with drm_vma_offset_lock_lookup(). See there for an example. This can then be + * used to implement weakly referenced lookups using kref_get_unless_zero(). * - * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup - * manually. See drm_vma_offset_lock_lookup() for an example. + * Example: + * drm_vma_offset_lock_lookup(mgr); + * node = drm_vma_offset_lookup_locked(mgr); + * if (node) + * kref_get_unless_zero(container_of(node, sth, entr)); + * drm_vma_offset_unlock_lookup(mgr); * * RETURNS: * Returns NULL if no suitable node can be found. Otherwise, the best match - * is returned. + * is returned. It's the caller's responsibility to make sure the node doesn't + * get destroyed before the caller can access it. */ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, unsigned long start, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d8568af2b414..09c4c6af8cd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -405,25 +405,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, - DRM_UNLOCKED | DRM_AUTH), + DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), }; static const struct file_operations exynos_drm_driver_fops = { diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1e3d65743bd2..2336af92d94b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1299,41 +1299,41 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), }; int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a06ec71e109d..d170131b0978 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -932,13 +932,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, } static const struct drm_ioctl_desc msm_ioctls[] = { - DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index f97a1964ef39..3f6ec077b51d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -68,12 +68,7 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) if (drm_device_is_unplugged(dev)) return -ENODEV; - mutex_lock(&dev->struct_mutex); - ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); - - mutex_unlock(&dev->struct_mutex); - if (ret) { pr_err("%s:drm_gem_mmap_obj fail\n", __func__); return ret; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 831461bc98a5..121975b07cd4 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -45,9 +45,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { int ret; - mutex_lock(&obj->dev->struct_mutex); ret = drm_gem_mmap_obj(obj, obj->size, vma); - mutex_unlock(&obj->dev->struct_mutex); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2416c7dddd5b..45ba67819199 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -862,18 +862,18 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) static const struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW), }; long diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 9a4ba4f03567..ad09590e8a46 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -412,9 +412,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, dispc_mgr_go(omap_crtc->channel); omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); } - - crtc->invert_dimensions = !!(crtc->primary->state->rotation & - (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))); } static int omap_crtc_atomic_set_property(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 4d5893473f78..5c6609cbb6a2 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -626,12 +626,12 @@ static int ioctl_gem_info(struct drm_device *dev, void *data, } static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { - DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_AUTH), + DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_AUTH), + DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_AUTH), + DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_AUTH), + DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_AUTH), }; /* diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 0cc71c9d08d5..27c297672076 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -140,15 +140,12 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; - struct drm_device *dev = obj->dev; int ret = 0; if (WARN_ON(!obj->filp)) return -EINVAL; - mutex_lock(&dev->struct_mutex); ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); - mutex_unlock(&dev->struct_mutex); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index bda5c5f80c24..2ae8577497ca 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -422,21 +422,21 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, } const struct drm_ioctl_desc qxl_ioctls[] = { - DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, - DRM_AUTH|DRM_UNLOCKED), + DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, - DRM_AUTH|DRM_UNLOCKED), + DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, - DRM_AUTH|DRM_UNLOCKED), + DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, - DRM_AUTH|DRM_UNLOCKED), + DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, - DRM_AUTH|DRM_UNLOCKED), + DRM_AUTH), }; int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 6f50a37b18cb..5f687c95a1e1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -876,20 +876,20 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), /* KMS */ - DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index a6d9104f7f15..8caea0a33dd8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -79,12 +79,9 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, int rockchip_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma) { - struct drm_device *drm = obj->dev; int ret; - mutex_lock(&drm->struct_mutex); ret = drm_gem_mmap_obj(obj, obj->size, vma); - mutex_unlock(&drm->struct_mutex); if (ret) return ret; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 759e6af91e59..159ef515cab1 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -778,20 +778,20 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data, static const struct drm_ioctl_desc tegra_drm_ioctls[] = { #ifdef CONFIG_DRM_TEGRA_STAGING - DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0), + DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0), + DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0), + DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0), + DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0), #endif }; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 860062ef8814..c503a840fd88 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -235,66 +235,13 @@ unlock: return ret; } -int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_vma_offset_node *node; - struct drm_gem_object *obj; - struct drm_vgem_gem_object *vgem_obj; - int ret = 0; - - mutex_lock(&dev->struct_mutex); - - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - if (!node) { - ret = -EINVAL; - goto out_unlock; - } else if (!drm_vma_node_is_allowed(node, filp)) { - ret = -EACCES; - goto out_unlock; - } - - obj = container_of(node, struct drm_gem_object, vma_node); - - vgem_obj = to_vgem_bo(obj); - - if (obj->dma_buf && vgem_obj->use_dma_buf) { - ret = dma_buf_mmap(obj->dma_buf, vma, 0); - goto out_unlock; - } - - if (!obj->dev->driver->gem_vm_ops) { - ret = -EINVAL; - goto out_unlock; - } - - vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_ops = obj->dev->driver->gem_vm_ops; - vma->vm_private_data = vgem_obj; - vma->vm_page_prot = - pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - - mutex_unlock(&dev->struct_mutex); - drm_gem_vm_open(vma); - return ret; - -out_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; -} - - static struct drm_ioctl_desc vgem_ioctls[] = { }; static const struct file_operations vgem_driver_fops = { .owner = THIS_MODULE, .open = drm_open, - .mmap = vgem_drm_gem_mmap, + .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8e7493d50f1a..b7525f78ac51 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -146,73 +146,73 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), + DRM_MASTER | DRM_CONTROL_ALLOW), VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), + DRM_MASTER | DRM_CONTROL_ALLOW), VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), + DRM_MASTER | DRM_CONTROL_ALLOW), VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), + DRM_MASTER | DRM_CONTROL_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), - VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | + DRM_AUTH | DRM_RENDER_ALLOW), + VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, vmw_fence_obj_signaled_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), /* these allow direct access to the framebuffers mark as master only */ VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, - DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), + DRM_MASTER | DRM_AUTH), VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, - DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), + DRM_MASTER | DRM_AUTH), VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, - DRM_MASTER | DRM_UNLOCKED), + DRM_MASTER), VMW_IOCTL_DEF(VMW_CREATE_SHADER, vmw_shader_define_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_UNREF_SHADER, vmw_shader_destroy_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, vmw_gb_surface_define_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, vmw_gb_surface_reference_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_SYNCCPU, vmw_user_dmabuf_synccpu_ioctl, - DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, vmw_extended_context_define_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), }; static struct pci_device_id vmw_pci_id_list[] = { |