diff options
author | Rob Clark | 2013-08-07 13:41:24 -0400 |
---|---|---|
committer | Dave Airlie | 2013-08-19 10:36:04 +1000 |
commit | bcc5c9d50e93bb7d949f6f38063b62dd35ca84d1 (patch) | |
tree | b1390993cb1b234f2a67a6e6dcb603472936dbbf | |
parent | 367bbd49202dd256dce1217c2f7cd0d5d1916f7b (diff) |
drm/gem: add shmem get/put page helpers
Basically just extracting some code duplicated in gma500, omapdrm, udl,
and upcoming msm driver.
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 103 | ||||
-rw-r--r-- | include/drm/drmP.h | 4 |
2 files changed, 107 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a8ba7da83d45..a4c8e8fba599 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -358,6 +358,109 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_create_mmap_offset); +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * from shmem + * @obj: obj in question + * @gfpmask: gfp mask of requested pages + */ +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +{ + struct inode *inode; + struct address_space *mapping; + struct page *p, **pages; + int i, npages; + + /* This is the shared memory object that backs the GEM resource */ + inode = file_inode(obj->filp); + mapping = inode->i_mapping; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + gfpmask |= mapping_gfp_mask(mapping); + + for (i = 0; i < npages; i++) { + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + + /* There is a hypothetical issue w/ drivers that require + * buffer memory in the low 4GB.. if the pages are un- + * pinned, and swapped out, they can end up swapped back + * in above 4GB. If pages are already in memory, then + * shmem_read_mapping_page_gfp will ignore the gfpmask, + * even if the already in-memory page disobeys the mask. + * + * It is only a theoretical issue today, because none of + * the devices with this limitation can be populated with + * enough memory to trigger the issue. But this BUG_ON() + * is here as a reminder in case the problem with + * shmem_read_mapping_page_gfp() isn't solved by the time + * it does become a real issue. + * + * See this thread: http://lkml.org/lkml/2011/7/11/238 + */ + BUG_ON((gfpmask & __GFP_DMA32) && + (page_to_pfn(p) >= 0x00100000UL)); + } + + return pages; + +fail: + while (i--) + page_cache_release(pages[i]); + + drm_free_large(pages); + return ERR_CAST(p); +} +EXPORT_SYMBOL(drm_gem_get_pages); + +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + * @dirty: if true, pages will be marked as dirty + * @accessed: if true, the pages will be marked as accessed + */ +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed) +{ + int i, npages; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + for (i = 0; i < npages; i++) { + if (dirty) + set_page_dirty(pages[i]); + + if (accessed) + mark_page_accessed(pages[i]); + + /* Undo the reference we took when populating the table */ + page_cache_release(pages[i]); + } + + drm_free_large(pages); +} +EXPORT_SYMBOL(drm_gem_put_pages); + /** Returns a reference to the object named by the handle. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d6d9a28fc6b4..91f343c8b160 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1616,6 +1616,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed); + struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, u32 handle); |