diff options
author | Mauro Carvalho Chehab | 2013-04-01 09:54:14 -0300 |
---|---|---|
committer | Mauro Carvalho Chehab | 2013-04-01 09:54:14 -0300 |
commit | f9f11dfe4831adb1531e1face9dcd9fc57665d2e (patch) | |
tree | 3fb605d9c6c7ed70d42237de21203fa131e6eee8 /drivers/gpu/drm | |
parent | 6bf7861fa2bb4be3cc70a6e9aed664ce65270027 (diff) | |
parent | 07961ac7c0ee8b546658717034fe692fd12eefa9 (diff) |
Merge tag 'v3.9-rc5' into patchwork
Linux 3.9-rc5
* tag 'v3.9-rc5': (1080 commits)
Linux 3.9-rc5
Revert "lockdep: check that no locks held at freeze time"
dw_dmac: adjust slave_id accordingly to request line base
dmaengine: dw_dma: fix endianess for DT xlate function
PNP: List Rafael Wysocki as a maintainer
rbd: don't zero-fill non-image object requests
ia64 idle: delete stale (*idle)() function pointer
Btrfs: don't drop path when printing out tree errors in scrub
target: Fix RESERVATION_CONFLICT status regression for iscsi-target special case
tcm_vhost: Avoid VIRTIO_RING_F_EVENT_IDX feature bit
Revert "mm: introduce VM_POPULATE flag to better deal with racy userspace programs"
usb: ftdi_sio: Add support for Mitsubishi FX-USB-AW/-BD
mg_disk: fix error return code in mg_probe()
Btrfs: fix wrong return value of btrfs_lookup_csum()
Btrfs: fix wrong reservation of csums
Btrfs: fix double free in the btrfs_qgroup_account_ref()
Btrfs: limit the global reserve to 512mb
Btrfs: hold the ordered operations mutex when waiting on ordered extents
Btrfs: fix space accounting for unlink and rename
Btrfs: fix space leak when we fail to reserve metadata space
...
Diffstat (limited to 'drivers/gpu/drm')
49 files changed, 859 insertions, 266 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c194f4e680ad..e2acfdbf7d3c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ @@ -1715,6 +1715,7 @@ set_size: } mode->type = DRM_MODE_TYPE_DRIVER; + mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 36493ce71f9a..98cc14725ba9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -38,11 +38,12 @@ /* position control register for hardware window 0, 2 ~ 4.*/ #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) -/* size control register for hardware window 0. */ -#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) -/* alpha control register for hardware window 1 ~ 4. */ -#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) -/* size control register for hardware window 1 ~ 4. */ +/* + * size control register for hardware windows 0 and alpha control register + * for hardware windows 1 ~ 4 + */ +#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16) +/* size control register for hardware windows 1 ~ 2. */ #define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) #define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) @@ -50,9 +51,9 @@ #define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) /* color key control register for hardware window 1 ~ 4. */ -#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) +#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8)) /* color key value register for hardware window 1 ~ 4. */ -#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) +#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8)) /* FIMD has totally five hardware windows. */ #define WINDOWS_NR 5 @@ -109,9 +110,9 @@ struct fimd_context { #ifdef CONFIG_OF static const struct of_device_id fimd_driver_dt_match[] = { - { .compatible = "samsung,exynos4-fimd", + { .compatible = "samsung,exynos4210-fimd", .data = &exynos4_fimd_driver_data }, - { .compatible = "samsung,exynos5-fimd", + { .compatible = "samsung,exynos5250-fimd", .data = &exynos5_fimd_driver_data }, {}, }; @@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos) if (win != 3 && win != 4) { u32 offset = VIDOSD_D(win); if (win == 0) - offset = VIDOSD_C_SIZE_W0; + offset = VIDOSD_C(win); val = win_data->ovl_width * win_data->ovl_height; writel(val, ctx->regs + offset); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3b0da0378acf..47a493c8a71f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -48,8 +48,14 @@ /* registers for base address */ #define G2D_SRC_BASE_ADDR 0x0304 +#define G2D_SRC_COLOR_MODE 0x030C +#define G2D_SRC_LEFT_TOP 0x0310 +#define G2D_SRC_RIGHT_BOTTOM 0x0314 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 #define G2D_DST_BASE_ADDR 0x0404 +#define G2D_DST_COLOR_MODE 0x040C +#define G2D_DST_LEFT_TOP 0x0410 +#define G2D_DST_RIGHT_BOTTOM 0x0414 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 #define G2D_PAT_BASE_ADDR 0x0500 #define G2D_MSK_BASE_ADDR 0x0520 @@ -82,7 +88,7 @@ #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 /* G2D_DMA_HOLD_CMD */ -#define G2D_USET_HOLD (1 << 2) +#define G2D_USER_HOLD (1 << 2) #define G2D_LIST_HOLD (1 << 1) #define G2D_BITBLT_HOLD (1 << 0) @@ -91,13 +97,27 @@ #define G2D_START_NHOLT (1 << 1) #define G2D_START_BITBLT (1 << 0) +/* buffer color format */ +#define G2D_FMT_XRGB8888 0 +#define G2D_FMT_ARGB8888 1 +#define G2D_FMT_RGB565 2 +#define G2D_FMT_XRGB1555 3 +#define G2D_FMT_ARGB1555 4 +#define G2D_FMT_XRGB4444 5 +#define G2D_FMT_ARGB4444 6 +#define G2D_FMT_PACKED_RGB888 7 +#define G2D_FMT_A8 11 +#define G2D_FMT_L8 12 + +/* buffer valid length */ +#define G2D_LEN_MIN 1 +#define G2D_LEN_MAX 8000 + #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) #define G2D_CMDLIST_NUM 64 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) -#define MAX_BUF_ADDR_NR 6 - /* maximum buffer pool size of userptr is 64MB as default */ #define MAX_POOL (64 * 1024 * 1024) @@ -106,6 +126,17 @@ enum { BUF_TYPE_USERPTR, }; +enum g2d_reg_type { + REG_TYPE_NONE = -1, + REG_TYPE_SRC, + REG_TYPE_SRC_PLANE2, + REG_TYPE_DST, + REG_TYPE_DST_PLANE2, + REG_TYPE_PAT, + REG_TYPE_MSK, + MAX_REG_TYPE_NR +}; + /* cmdlist data structure */ struct g2d_cmdlist { u32 head; @@ -113,6 +144,42 @@ struct g2d_cmdlist { u32 last; /* last data offset */ }; +/* + * A structure of buffer description + * + * @format: color format + * @left_x: the x coordinates of left top corner + * @top_y: the y coordinates of left top corner + * @right_x: the x coordinates of right bottom corner + * @bottom_y: the y coordinates of right bottom corner + * + */ +struct g2d_buf_desc { + unsigned int format; + unsigned int left_x; + unsigned int top_y; + unsigned int right_x; + unsigned int bottom_y; +}; + +/* + * A structure of buffer information + * + * @map_nr: manages the number of mapped buffers + * @reg_types: stores regitster type in the order of requested command + * @handles: stores buffer handle in its reg_type position + * @types: stores buffer type in its reg_type position + * @descs: stores buffer description in its reg_type position + * + */ +struct g2d_buf_info { + unsigned int map_nr; + enum g2d_reg_type reg_types[MAX_REG_TYPE_NR]; + unsigned long handles[MAX_REG_TYPE_NR]; + unsigned int types[MAX_REG_TYPE_NR]; + struct g2d_buf_desc descs[MAX_REG_TYPE_NR]; +}; + struct drm_exynos_pending_g2d_event { struct drm_pending_event base; struct drm_exynos_g2d_event event; @@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr { bool in_pool; bool out_of_list; }; - struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; - unsigned int map_nr; - unsigned long handles[MAX_BUF_ADDR_NR]; - unsigned int obj_type[MAX_BUF_ADDR_NR]; dma_addr_t dma_addr; + struct g2d_buf_info buf_info; struct drm_exynos_pending_g2d_event *event; }; @@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) struct exynos_drm_subdrv *subdrv = &g2d->subdrv; int nr; int ret; + struct g2d_buf_info *buf_info; init_dma_attrs(&g2d->cmdlist_dma_attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); @@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) } for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { + unsigned int i; + node[nr].cmdlist = g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; node[nr].dma_addr = g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; + buf_info = &node[nr].buf_info; + for (i = 0; i < MAX_REG_TYPE_NR; i++) + buf_info->reg_types[i] = REG_TYPE_NONE; + list_add_tail(&node[nr].list, &g2d->free_cmdlist); } @@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, DMA_BIDIRECTIONAL); if (ret < 0) { DRM_ERROR("failed to map sgt with dma region.\n"); - goto err_free_sgt; + goto err_sg_free_table; } g2d_userptr->dma_addr = sgt->sgl[0].dma_address; @@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, return &g2d_userptr->dma_addr; -err_free_sgt: +err_sg_free_table: sg_free_table(sgt); + +err_free_sgt: kfree(sgt); sgt = NULL; @@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev, g2d->current_pool = 0; } +static enum g2d_reg_type g2d_get_reg_type(int reg_offset) +{ + enum g2d_reg_type reg_type; + + switch (reg_offset) { + case G2D_SRC_BASE_ADDR: + case G2D_SRC_COLOR_MODE: + case G2D_SRC_LEFT_TOP: + case G2D_SRC_RIGHT_BOTTOM: + reg_type = REG_TYPE_SRC; + break; + case G2D_SRC_PLANE2_BASE_ADDR: + reg_type = REG_TYPE_SRC_PLANE2; + break; + case G2D_DST_BASE_ADDR: + case G2D_DST_COLOR_MODE: + case G2D_DST_LEFT_TOP: + case G2D_DST_RIGHT_BOTTOM: + reg_type = REG_TYPE_DST; + break; + case G2D_DST_PLANE2_BASE_ADDR: + reg_type = REG_TYPE_DST_PLANE2; + break; + case G2D_PAT_BASE_ADDR: + reg_type = REG_TYPE_PAT; + break; + case G2D_MSK_BASE_ADDR: + reg_type = REG_TYPE_MSK; + break; + default: + reg_type = REG_TYPE_NONE; + DRM_ERROR("Unknown register offset![%d]\n", reg_offset); + break; + }; + + return reg_type; +} + +static unsigned long g2d_get_buf_bpp(unsigned int format) +{ + unsigned long bpp; + + switch (format) { + case G2D_FMT_XRGB8888: + case G2D_FMT_ARGB8888: + bpp = 4; + break; + case G2D_FMT_RGB565: + case G2D_FMT_XRGB1555: + case G2D_FMT_ARGB1555: + case G2D_FMT_XRGB4444: + case G2D_FMT_ARGB4444: + bpp = 2; + break; + case G2D_FMT_PACKED_RGB888: + bpp = 3; + break; + default: + bpp = 1; + break; + } + + return bpp; +} + +static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc, + enum g2d_reg_type reg_type, + unsigned long size) +{ + unsigned int width, height; + unsigned long area; + + /* + * check source and destination buffers only. + * so the others are always valid. + */ + if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) + return true; + + width = buf_desc->right_x - buf_desc->left_x; + if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { + DRM_ERROR("width[%u] is out of range!\n", width); + return false; + } + + height = buf_desc->bottom_y - buf_desc->top_y; + if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { + DRM_ERROR("height[%u] is out of range!\n", height); + return false; + } + + area = (unsigned long)width * (unsigned long)height * + g2d_get_buf_bpp(buf_desc->format); + if (area > size) { + DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size); + return false; + } + + return true; +} + static int g2d_map_cmdlist_gem(struct g2d_data *g2d, struct g2d_cmdlist_node *node, struct drm_device *drm_dev, struct drm_file *file) { struct g2d_cmdlist *cmdlist = node->cmdlist; + struct g2d_buf_info *buf_info = &node->buf_info; int offset; + int ret; int i; - for (i = 0; i < node->map_nr; i++) { + for (i = 0; i < buf_info->map_nr; i++) { + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + int reg_pos; unsigned long handle; dma_addr_t *addr; - offset = cmdlist->last - (i * 2 + 1); - handle = cmdlist->data[offset]; + reg_pos = cmdlist->last - 2 * (i + 1); + + offset = cmdlist->data[reg_pos]; + handle = cmdlist->data[reg_pos + 1]; + + reg_type = g2d_get_reg_type(offset); + if (reg_type == REG_TYPE_NONE) { + ret = -EFAULT; + goto err; + } + + buf_desc = &buf_info->descs[reg_type]; + + if (buf_info->types[reg_type] == BUF_TYPE_GEM) { + unsigned long size; + + size = exynos_drm_gem_get_size(drm_dev, handle, file); + if (!size) { + ret = -EFAULT; + goto err; + } + + if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, + size)) { + ret = -EFAULT; + goto err; + } - if (node->obj_type[i] == BUF_TYPE_GEM) { addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, file); if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; } } else { struct drm_exynos_g2d_userptr g2d_userptr; if (copy_from_user(&g2d_userptr, (void __user *)handle, sizeof(struct drm_exynos_g2d_userptr))) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; + } + + if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, + g2d_userptr.size)) { + ret = -EFAULT; + goto err; } addr = g2d_userptr_get_dma_addr(drm_dev, @@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, file, &handle); if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; + ret = -EFAULT; + goto err; } } - cmdlist->data[offset] = *addr; - node->handles[i] = handle; + cmdlist->data[reg_pos + 1] = *addr; + buf_info->reg_types[i] = reg_type; + buf_info->handles[reg_type] = handle; } return 0; + +err: + buf_info->map_nr = i; + return ret; } static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, @@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, struct drm_file *filp) { struct exynos_drm_subdrv *subdrv = &g2d->subdrv; + struct g2d_buf_info *buf_info = &node->buf_info; int i; - for (i = 0; i < node->map_nr; i++) { - unsigned long handle = node->handles[i]; + for (i = 0; i < buf_info->map_nr; i++) { + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + unsigned long handle; + + reg_type = buf_info->reg_types[i]; + + buf_desc = &buf_info->descs[reg_type]; + handle = buf_info->handles[reg_type]; - if (node->obj_type[i] == BUF_TYPE_GEM) + if (buf_info->types[reg_type] == BUF_TYPE_GEM) exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp); else g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, false); - node->handles[i] = 0; + buf_info->reg_types[i] = REG_TYPE_NONE; + buf_info->handles[reg_type] = 0; + buf_info->types[reg_type] = 0; + memset(buf_desc, 0x00, sizeof(*buf_desc)); } - node->map_nr = 0; + buf_info->map_nr = 0; } static void g2d_dma_start(struct g2d_data *g2d, @@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d, pm_runtime_get_sync(g2d->dev); clk_enable(g2d->gate_clk); - /* interrupt enable */ - writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF, - g2d->regs + G2D_INTEN); - writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); } @@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work) struct g2d_data *g2d = container_of(work, struct g2d_data, runqueue_work); - mutex_lock(&g2d->runqueue_mutex); clk_disable(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); @@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev, int i; for (i = 0; i < nr; i++) { - index = cmdlist->last - 2 * (i + 1); + struct g2d_buf_info *buf_info = &node->buf_info; + struct g2d_buf_desc *buf_desc; + enum g2d_reg_type reg_type; + unsigned long value; - if (for_addr) { - /* check userptr buffer type. */ - reg_offset = (cmdlist->data[index] & - ~0x7fffffff) >> 31; - if (reg_offset) { - node->obj_type[i] = BUF_TYPE_USERPTR; - cmdlist->data[index] &= ~G2D_BUF_USERPTR; - } - } + index = cmdlist->last - 2 * (i + 1); reg_offset = cmdlist->data[index] & ~0xfffff000; - if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) goto err; if (reg_offset % 4) @@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev, if (!for_addr) goto err; - if (node->obj_type[i] != BUF_TYPE_USERPTR) - node->obj_type[i] = BUF_TYPE_GEM; + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + /* check userptr buffer type. */ + if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { + buf_info->types[reg_type] = BUF_TYPE_USERPTR; + cmdlist->data[index] &= ~G2D_BUF_USERPTR; + } else + buf_info->types[reg_type] = BUF_TYPE_GEM; + break; + case G2D_SRC_COLOR_MODE: + case G2D_DST_COLOR_MODE: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->format = value & 0xf; + break; + case G2D_SRC_LEFT_TOP: + case G2D_DST_LEFT_TOP: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->left_x = value & 0x1fff; + buf_desc->top_y = (value & 0x1fff0000) >> 16; + break; + case G2D_SRC_RIGHT_BOTTOM: + case G2D_DST_RIGHT_BOTTOM: + if (for_addr) + goto err; + + reg_type = g2d_get_reg_type(reg_offset); + if (reg_type == REG_TYPE_NONE) + goto err; + + buf_desc = &buf_info->descs[reg_type]; + value = cmdlist->data[index + 1]; + + buf_desc->right_x = value & 0x1fff; + buf_desc->bottom_y = (value & 0x1fff0000) >> 16; break; default: if (for_addr) @@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; cmdlist->data[cmdlist->last++] = 0; + /* + * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG + * and GCF bit should be set to INTEN register if user wants + * G2D interrupt event once current command list execution is + * finished. + * Otherwise only ACF bit should be set to INTEN register so + * that one interrupt is occured after all command lists + * have been completed. + */ if (node->event) { + cmdlist->data[cmdlist->last++] = G2D_INTEN; + cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; + } else { + cmdlist->data[cmdlist->last++] = G2D_INTEN; + cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; } /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ @@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, if (ret < 0) goto err_free_event; - node->map_nr = req->cmd_buf_nr; + node->buf_info.map_nr = req->cmd_buf_nr; if (req->cmd_buf_nr) { struct drm_exynos_g2d_cmd *cmd_buf; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 67e17ce112b6..0e6fe000578c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -164,6 +164,27 @@ out: exynos_gem_obj = NULL; } +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, + unsigned int gem_handle, + struct drm_file *file_priv) +{ + struct exynos_drm_gem_obj *exynos_gem_obj; + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object.\n"); + return 0; + } + + exynos_gem_obj = to_exynos_gem_obj(obj); + + drm_gem_object_unreference_unlocked(obj); + + return exynos_gem_obj->buffer->size; +} + + struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 35ebac47dc2b..468766bee450 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +/* get buffer size to gem handle. */ +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, + unsigned int gem_handle, + struct drm_file *file_priv); + /* initialize gem object. */ int exynos_drm_gem_init_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 13ccbd4bcfaa..9504b0cd825a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev, } edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; - edid = kzalloc(edid_len, GFP_KERNEL); + edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); if (!edid) { DRM_DEBUG_KMS("failed to allocate edid\n"); return ERR_PTR(-ENOMEM); } - memcpy(edid, ctx->raw_edid, edid_len); return edid; } @@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, return -EINVAL; } edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; - ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); + ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL); if (!ctx->raw_edid) { DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); return -ENOMEM; } - memcpy(ctx->raw_edid, raw_edid, edid_len); } else { /* * with connection = 0, free raw_edid diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e919aba29b3d..2f4f72f07047 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win) mixer_ctx->win_data[win].enabled = false; } -int mixer_check_timing(void *ctx, struct fb_videomode *timing) +static int mixer_check_timing(void *ctx, struct fb_videomode *timing) { struct mixer_context *mixer_ctx = ctx; u32 w, h; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aae31489c893..7299ea45dd03 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -103,7 +103,7 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c5b8c81b9440..e9b57893db2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support, "Enable Haswell and ValleyView Support. " "(default: false)"); +int i915_disable_power_well __read_mostly = 0; +module_param_named(disable_power_well, i915_disable_power_well, int, 0600); +MODULE_PARM_DESC(disable_power_well, + "Disable the power well when possible (default: false)"); + static struct drm_driver driver; extern int intel_agp_enabled; @@ -379,15 +384,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ - INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ - INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), @@ -495,6 +500,7 @@ static int i915_drm_freeze(struct drm_device *dev) intel_modeset_disable(dev); drm_irq_uninstall(dev); + dev_priv->enable_hotplug_processing = false; } i915_save_state(dev); @@ -568,10 +574,20 @@ static int __i915_drm_thaw(struct drm_device *dev) error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); + /* We need working interrupts for modeset enabling ... */ + drm_irq_install(dev); + intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev, false); - drm_irq_install(dev); + + /* + * ... but also need to make sure that hotplug processing + * doesn't cause havoc. Like in the driver load code we don't + * bother with the tiny race here where we might loose hotplug + * notifications. + * */ intel_hpd_init(dev); + dev_priv->enable_hotplug_processing = true; } intel_opregion_init(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e95337c97459..01769e2a9953 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; +extern int i915_disable_power_well __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2f2daebd0eef..3b11ab0fbc96 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count) { int i; + int relocs_total = 0; + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; @@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) return -EINVAL; - /* First check for malicious input causing overflow */ - if (exec[i].relocation_count > - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + /* First check for malicious input causing overflow in + * the worst case where we need to allocate the entire + * relocation tree as a single array. + */ + if (exec[i].relocation_count > relocs_max - relocs_total) return -EINVAL; + relocs_total += exec[i].relocation_count; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2cd97d1cc920..3c7bb0410b51 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 de_iir, gt_iir, de_ier, pm_iir; + u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; irqreturn_t ret = IRQ_NONE; int i; @@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + /* Disable south interrupts. We'll only write to SDEIIR once, so further + * interrupts will will be stored on its back queue, and then we'll be + * able to process them after we restore SDEIER (as soon as we restore + * it, we'll get an interrupt if SDEIIR still has something to process + * due to its back queue). */ + sde_ier = I915_READ(SDEIER); + I915_WRITE(SDEIER, 0); + POSTING_READ(SDEIER); + gt_iir = I915_READ(GTIIR); if (gt_iir) { snb_gt_irq_handler(dev, dev_priv, gt_iir); @@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); + I915_WRITE(SDEIER, sde_ier); + POSTING_READ(SDEIER); return ret; } @@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pm_iir; + u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; atomic_inc(&dev_priv->irq_received); @@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); POSTING_READ(DEIER); + /* Disable south interrupts. We'll only write to SDEIIR once, so further + * interrupts will will be stored on its back queue, and then we'll be + * able to process them after we restore SDEIER (as soon as we restore + * it, we'll get an interrupt if SDEIIR still has something to process + * due to its back queue). */ + sde_ier = I915_READ(SDEIER); + I915_WRITE(SDEIER, 0); + POSTING_READ(SDEIER); + de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pm_iir = I915_READ(GEN6_PMIIR); @@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) done: I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); + I915_WRITE(SDEIER, sde_ier); + POSTING_READ(SDEIER); return ret; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 527b664d3434..848992f67d56 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1613,9 +1613,9 @@ #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 -#define ADPA_VSYNC_CNTL_DISABLE (1<<11) +#define ADPA_VSYNC_CNTL_DISABLE (1<<10) #define ADPA_VSYNC_CNTL_ENABLE 0 -#define ADPA_HSYNC_CNTL_DISABLE (1<<10) +#define ADPA_HSYNC_CNTL_DISABLE (1<<11) #define ADPA_HSYNC_CNTL_ENABLE 0 #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) #define ADPA_VSYNC_ACTIVE_LOW 0 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 969d08c72d10..32a3693905ec 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder) u32 temp; temp = I915_READ(crt->adpa_reg); - temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; temp &= ~ADPA_DAC_ENABLE; I915_WRITE(crt->adpa_reg, temp); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index d64af5aa4a1c..8d0bac3c35d7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = encoder->dev->dev_private; enum port port = intel_dig_port->port; - bool wait; uint32_t val; + bool wait = false; if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { val = I915_READ(DDI_BUF_CTL(port)); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a05ac2c91ba2..b20d50192fcc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) */ } +/** + * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware + * cursor plane briefly if not already running after enabling the display + * plane. + * This workaround avoids occasional blank screens when self refresh is + * enabled. + */ +static void +g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + u32 cntl = I915_READ(CURCNTR(pipe)); + + if ((cntl & CURSOR_MODE) == 0) { + u32 fw_bcl_self = I915_READ(FW_BLC_SELF); + + I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); + I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); + intel_wait_for_vblank(dev_priv->dev, pipe); + I915_WRITE(CURCNTR(pipe), cntl); + I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); + I915_WRITE(FW_BLC_SELF, fw_bcl_self); + } +} + static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_enable_pipe(dev_priv, pipe, false); intel_enable_plane(dev_priv, plane, pipe); + if (IS_G4X(dev)) + g4x_fixup_plane(dev_priv, pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); @@ -5745,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, num_connectors++; } + if (is_cpu_edp) + intel_crtc->cpu_transcoder = TRANSCODER_EDP; + else + intel_crtc->cpu_transcoder = pipe; + /* We are not sure yet this won't happen. */ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); @@ -5811,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; int ret; - if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) - intel_crtc->cpu_transcoder = TRANSCODER_EDP; - else - intel_crtc->cpu_transcoder = pipe; - drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, @@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_framebuffer *old_fb = crtc->fb; + struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; @@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; - intel_fb = to_intel_framebuffer(crtc->fb); - work->old_fb_obj = intel_fb->obj; + work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; INIT_WORK(&work->work, intel_unpin_work_fn); ret = drm_vblank_get(dev, intel_crtc->pipe); @@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->unpin_work = work; spin_unlock_irqrestore(&dev->event_lock, flags); - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - if (atomic_read(&intel_crtc->unpin_work_count) >= 2) flush_workqueue(dev_priv->wq); @@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); + crtc->fb = old_fb; drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f61cb7998c72..d7d4afe01341 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) if (has_aux_irq) - done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); + done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies(10)); else done = wait_for_atomic(C, 10) == 0; if (!done) @@ -819,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct intel_link_m_n m_n; int pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; + int target_clock; /* * Find the lane count in the intel_encoder private @@ -834,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, } } + target_clock = mode->clock; + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + target_clock = intel_edp_target_clock(intel_encoder, + mode); + break; + } + } + /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_link_compute_m_n(intel_crtc->bpp, lane_count, - mode->clock, adjusted_mode->clock, &m_n); + target_clock, adjusted_mode->clock, &m_n); if (IS_HASWELL(dev)) { I915_WRITE(PIPE_DATA_M1(cpu_transcoder), @@ -1929,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count && voltage_tries == 5) { + if (i == intel_dp->lane_count) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index acf8aec9ada7..ef4744e1bf0b 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->data = bus; } -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) +/* + * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI + * mode. This results in spurious interrupt warnings if the legacy irq no. is + * shared with another device. The kernel then disables that interrupt source + * and so prevents the other device from working properly. + */ +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2_status, @@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2 = 0; DEFINE_WAIT(wait); + if (!HAS_GMBUS_IRQ(dev_priv->dev)) + gmbus4_irq_en = 0; + /* Important: The hw handles only the first bit, so set only one! Since * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a3730e0289e5..bee8cb6108a7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev, } set_level: - /* Check the current backlight level and try to set again if it's zero. - * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically - * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. */ - if (!intel_panel_get_backlight(dev)) - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 61fee7fcdc2c..adca00783e61 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ @@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable) if (!IS_HASWELL(dev)) return; + if (!i915_disable_power_well && !enable) + return; + tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE; enable_requested = tmp & HSW_PWR_WELL_ENABLE; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 5ea5033eae0a..4d932c46725d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -112,7 +112,6 @@ struct mga_framebuffer { struct mga_fbdev { struct drm_fb_helper helper; struct mga_framebuffer mfb; - struct list_head fbdev_list; void *sysram; int size; struct ttm_bo_kmap_obj mapping; diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 5a88ec51b513..d3dcf54e6233 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev) int ret; int data, clock; + WREG_DAC(MGA1064_GEN_IO_CTL2, 1); WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); WREG_DAC(MGA1064_GEN_IO_CTL, 0); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d3d99a28ddef..fe22bb780e1d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) m = n = p = 0; vcomax = 800000; vcomin = 400000; - pllreffreq = 3333; + pllreffreq = 33333; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; - for (testp = 16; testp > 0; testp--) { + for (testp = 16; testp > 0; testp >>= 1) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { - for (testn = 1; testn < 257; testn++) { + for (testn = 17; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) @@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; - m = (testm - 1) | ((n >> 1) & 0x80); + m = (testm - 1); p = testp - 1; } if ((clock * testp) >= 600000) - p |= 80; + p |= 0x80; } } } @@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector) static int mga_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *dev = connector->dev; + struct mga_device *mdev = (struct mga_device*)dev->dev_private; + struct mga_fbdev *mfbdev = mdev->mfbdev; + struct drm_fb_helper *fb_helper = &mfbdev->helper; + struct drm_fb_helper_connector *fb_helper_conn = NULL; + int bpp = 32; + int i = 0; + /* FIXME: Add bandwidth and g200se limitations */ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || @@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector, return MODE_BAD; } + /* Validate the mode input by the user */ + for (i = 0; i < fb_helper->connector_count; i++) { + if (fb_helper->connector_info[i]->connector == connector) { + /* Found the helper for this connector */ + fb_helper_conn = fb_helper->connector_info[i]; + if (fb_helper_conn->cmdline_mode.specified) { + if (fb_helper_conn->cmdline_mode.bpp_specified) { + bpp = fb_helper_conn->cmdline_mode.bpp; + } + } + } + } + + if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) { + if (fb_helper_conn) + fb_helper_conn->cmdline_mode.specified = false; + return MODE_BAD; + } + return MODE_OK; } diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 0daab62ea14c..3b2e7b6304d3 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c @@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) struct nouveau_object *parent = NULL; struct nouveau_object *namedb = NULL; struct nouveau_handle *handle = NULL; - int ret = -EINVAL; parent = nouveau_handle_ref(client, _parent); if (!parent) @@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) } nouveau_object_ref(NULL, &parent); - return ret; + return handle ? 0 : -EINVAL; } int diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 5fa13267bd9f..02e369f80449 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -544,13 +544,13 @@ nv50_disp_curs_ofuncs = { static void nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) { - nv_mask(event->priv, 0x61002c, (1 << head), (1 << head)); + nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); } static void nv50_disp_base_vblank_disable(struct nouveau_event *event, int head) { - nv_mask(event->priv, 0x61002c, (1 << head), (0 << head)); + nv_mask(event->priv, 0x61002c, (4 << head), 0); } static int diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c index 61cec0f6ff1c..4857f913efdd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c @@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv) nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); } - nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); + nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); } diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h index 6b17b614629f..0b20fc0d19c1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h @@ -4,7 +4,7 @@ #include <core/device.h> #include <core/subdev.h> -enum nouveau_therm_mode { +enum nouveau_therm_fan_mode { NOUVEAU_THERM_CTRL_NONE = 0, NOUVEAU_THERM_CTRL_MANUAL = 1, NOUVEAU_THERM_CTRL_AUTO = 2, diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 2cc1e6a5eb6a..9c41b58d57e2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init) init->offset += 2; init_wr32(init, dreg, idata); - init_mask(init, creg, ~mask, data | idata); + init_mask(init, creg, ~mask, data | iaddr); } } diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index a114a0ed7e98..2e98e8a3f1aa 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent, /* drop port's i2c subdev refcount, i2c handles this itself */ if (ret == 0) { list_add_tail(&port->head, &i2c->ports); + atomic_dec(&parent->refcount); atomic_dec(&engine->refcount); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index f794dc89a3b2..a00a5a76e2d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm) } int -nouveau_therm_mode(struct nouveau_therm *therm, int mode) +nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) { struct nouveau_therm_priv *priv = (void *)therm; struct nouveau_device *device = nv_device(therm); @@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode) (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) return -EINVAL; + /* do not allow automatic fan management if the thermal sensor is + * not available */ + if (priv->mode == 2 && therm->temp_get(therm) < 0) + return -EINVAL; + if (priv->mode == mode) return 0; - nv_info(therm, "Thermal management: %s\n", name[mode]); + nv_info(therm, "fan management: %s\n", name[mode]); nouveau_therm_update(therm, mode); return 0; } @@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm, priv->fan->bios.max_duty = value; return 0; case NOUVEAU_THERM_ATTR_FAN_MODE: - return nouveau_therm_mode(therm, value); + return nouveau_therm_fan_mode(therm, value); case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: priv->bios_sensor.thrs_fan_boost.temp = value; priv->sensor.program_alarms(therm); @@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object) return ret; if (priv->suspend >= 0) - nouveau_therm_mode(therm, priv->mode); + nouveau_therm_fan_mode(therm, priv->mode); priv->sensor.program_alarms(therm); return 0; } @@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent, int nouveau_therm_preinit(struct nouveau_therm *therm) { - nouveau_therm_ic_ctor(therm); nouveau_therm_sensor_ctor(therm); + nouveau_therm_ic_ctor(therm); nouveau_therm_fan_ctor(therm); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_sensor_preinit(therm); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e24090bac195..8b3adec5fbb1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, struct i2c_board_info *info) { struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; struct i2c_client *client; request_module("%s%s", I2C_MODULE_PREFIX, info->type); @@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, } nv_info(priv, - "Found an %s at address 0x%x (controlled by lm_sensors)\n", - info->type, info->addr); + "Found an %s at address 0x%x (controlled by lm_sensors, " + "temp offset %+i C)\n", + info->type, info->addr, sensor->offset_constant); priv->ic = client; return true; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c index 0f5363edb964..a70d1b7e397b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c @@ -29,54 +29,83 @@ struct nv40_therm_priv { struct nouveau_therm_priv base; }; +enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; + +static enum nv40_sensor_style +nv40_sensor_style(struct nouveau_therm *therm) +{ + struct nouveau_device *device = nv_device(therm); + + switch (device->chipset) { + case 0x43: + case 0x44: + case 0x4a: + case 0x47: + return OLD_STYLE; + + case 0x46: + case 0x49: + case 0x4b: + case 0x4e: + case 0x4c: + case 0x67: + case 0x68: + case 0x63: + return NEW_STYLE; + default: + return INVALID_STYLE; + } +} + static int nv40_sensor_setup(struct nouveau_therm *therm) { - struct nouveau_device *device = nv_device(therm); + enum nv40_sensor_style style = nv40_sensor_style(therm); /* enable ADC readout and disable the ALARM threshold */ - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_mask(therm, 0x15b8, 0x80000000, 0); nv_wr32(therm, 0x15b0, 0x80003fff); - mdelay(10); /* wait for the temperature to stabilize */ + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); + mdelay(20); /* wait for the temperature to stabilize */ return nv_rd32(therm, 0x15b4) & 0xff; - } + } else + return -ENODEV; } static int nv40_temp_get(struct nouveau_therm *therm) { struct nouveau_therm_priv *priv = (void *)therm; - struct nouveau_device *device = nv_device(therm); struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + enum nv40_sensor_style style = nv40_sensor_style(therm); int core_temp; - if (device->chipset >= 0x46) { + if (style == NEW_STYLE) { nv_wr32(therm, 0x15b0, 0x80003fff); core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; - } else { + } else if (style == OLD_STYLE) { nv_wr32(therm, 0x15b0, 0xff); core_temp = nv_rd32(therm, 0x15b4) & 0xff; - } - - /* Setup the sensor if the temperature is 0 */ - if (core_temp == 0) - core_temp = nv40_sensor_setup(therm); + } else + return -ENODEV; - if (sensor->slope_div == 0) - sensor->slope_div = 1; - if (sensor->offset_den == 0) - sensor->offset_den = 1; - if (sensor->slope_mult < 1) - sensor->slope_mult = 1; + /* if the slope or the offset is unset, do no use the sensor */ + if (!sensor->slope_div || !sensor->slope_mult || + !sensor->offset_num || !sensor->offset_den) + return -ENODEV; core_temp = core_temp * sensor->slope_mult / sensor->slope_div; core_temp = core_temp + sensor->offset_num / sensor->offset_den; core_temp = core_temp + sensor->offset_constant - 8; + /* reserve negative temperatures for errors */ + if (core_temp < 0) + core_temp = 0; + return core_temp; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h index 06b98706b3fc..438d9824b774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h @@ -102,7 +102,7 @@ struct nouveau_therm_priv { struct i2c_client *ic; }; -int nouveau_therm_mode(struct nouveau_therm *therm, int mode); +int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); int nouveau_therm_attr_get(struct nouveau_therm *therm, enum nouveau_therm_attr_type type); int nouveau_therm_attr_set(struct nouveau_therm *therm, @@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm); int nouveau_therm_preinit(struct nouveau_therm *); +void nouveau_therm_sensor_preinit(struct nouveau_therm *); void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, enum nouveau_therm_thrs thrs, enum nouveau_therm_thrs_state st); diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index b37624af8297..470f6a47b656 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm) { struct nouveau_therm_priv *priv = (void *)therm; - priv->bios_sensor.slope_mult = 1; - priv->bios_sensor.slope_div = 1; - priv->bios_sensor.offset_num = 0; - priv->bios_sensor.offset_den = 1; priv->bios_sensor.offset_constant = 0; priv->bios_sensor.thrs_fan_boost.temp = 90; @@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) struct nouveau_therm_priv *priv = (void *)therm; struct nvbios_therm_sensor *s = &priv->bios_sensor; - if (!priv->bios_sensor.slope_div) - priv->bios_sensor.slope_div = 1; - if (!priv->bios_sensor.offset_den) - priv->bios_sensor.offset_den = 1; - /* enforce a minimum hysteresis on thresholds */ s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); @@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, const char *thresolds[] = { "fanboost", "downclock", "critical", "shutdown" }; - uint8_t temperature = therm->temp_get(therm); + int temperature = therm->temp_get(therm); if (thrs < 0 || thrs > 3) return; if (dir == NOUVEAU_THERM_THRS_FALLING) - nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", + nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", temperature, thresolds[thrs]); else - nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", + nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", temperature, thresolds[thrs]); active = (dir == NOUVEAU_THERM_THRS_RISING); @@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, case NOUVEAU_THERM_THRS_FANBOOST: if (active) { nouveau_therm_fan_set(therm, true, 100); - nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); } break; case NOUVEAU_THERM_THRS_DOWNCLOCK: @@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm) NOUVEAU_THERM_THRS_SHUTDOWN); /* schedule the next poll in one second */ - if (list_empty(&alarm->head)) + if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); @@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) alarm_timer_callback(&priv->sensor.therm_poll_alarm); } +void +nouveau_therm_sensor_preinit(struct nouveau_therm *therm) +{ + const char *sensor_avail = "yes"; + + if (therm->temp_get(therm) < 0) + sensor_avail = "no"; + + nv_info(therm, "internal sensor: %s\n", sensor_avail); +} + int nouveau_therm_sensor_ctor(struct nouveau_therm *therm) { diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 41241922263f..3b6dc883e150 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -116,6 +116,11 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, { struct nouveau_abi16_ntfy *ntfy, *temp; + /* wait for all activity to stop before releasing notify object, which + * may be still in use */ + if (chan->chan && chan->ntfy) + nouveau_channel_idle(chan->chan); + /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c index d28430cd2ba6..6e7a55f93a85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_agp.c +++ b/drivers/gpu/drm/nouveau/nouveau_agp.c @@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm) if (drm->agp.stat == UNKNOWN) { if (!nouveau_agpmode) return false; +#ifdef __powerpc__ + /* Disable AGP by default on all PowerPC machines for + * now -- At least some UniNorth-2 AGP bridges are + * known to be broken: DMA from the host to the card + * works just fine, but writeback from the card to the + * host goes straight to memory untranslated bypassing + * the GATT somehow, making them quite painful to deal + * with... + */ + if (nouveau_agpmode == -1) + return false; +#endif return true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 11ca82148edc..7ff10711a4d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -801,7 +801,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, stride = 16 * 4; height = amount / stride; - if (new_mem->mem_type == TTM_PL_VRAM && + if (old_mem->mem_type == TTM_PL_VRAM && nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) @@ -823,7 +823,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); OUT_RING (chan, 1); } - if (old_mem->mem_type == TTM_PL_VRAM && + if (new_mem->mem_type == TTM_PL_VRAM && nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index bb54098c6d97..936b442a6ab7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_therm *therm = nouveau_therm(drm->device); + int temp = therm->temp_get(therm); - return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); + if (temp < 0) + return temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, NULL, 0); @@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_get_pwm1_max, nouveau_hwmon_set_pwm1_max, 0); -static struct attribute *hwmon_attributes[] = { +static struct attribute *hwmon_default_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_temp_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, @@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_emergency.dev_attr.attr, &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, - &sensor_dev_attr_name.dev_attr.attr, - &sensor_dev_attr_update_rate.dev_attr.attr, NULL }; static struct attribute *hwmon_fan_rpm_attributes[] = { @@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { NULL }; -static const struct attribute_group hwmon_attrgroup = { - .attrs = hwmon_attributes, +static const struct attribute_group hwmon_default_attrgroup = { + .attrs = hwmon_default_attributes, +}; +static const struct attribute_group hwmon_temp_attrgroup = { + .attrs = hwmon_temp_attributes, }; static const struct attribute_group hwmon_fan_rpm_attrgroup = { .attrs = hwmon_fan_rpm_attributes, @@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev) } dev_set_drvdata(hwmon_dev, dev); - /* default sysfs entries */ - ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); + /* set the default attributes */ + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); if (ret) { if (ret) goto error; } + /* if the card has a working thermal sensor */ + if (therm->temp_get(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); + if (ret) { + if (ret) + goto error; + } + } + /* if the card has a pwm fan */ /*XXX: incorrect, need better detection for this, some boards have * the gpio entries for pwm fan control even when there's no @@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev) struct nouveau_pm *pm = nouveau_pm(dev); if (pm->hwmon) { - sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_pwm_fan_attrgroup); - sysfs_remove_group(&pm->hwmon->kobj, - &hwmon_fan_rpm_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); hwmon_device_unregister(pm->hwmon); } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a6237c9cbbc3..7f0e6c3f37d1 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -55,9 +55,9 @@ /* offsets in shared sync bo of various structures */ #define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) -#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) -#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) -#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) +#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) +#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) +#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) #define EVO_CORE_HANDLE (0xd1500000) #define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) @@ -341,10 +341,8 @@ struct nv50_curs { struct nv50_sync { struct nv50_dmac base; - struct { - u32 offset; - u16 value; - } sem; + u32 addr; + u32 data; }; struct nv50_ovly { @@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc) return nv50_disp(dev)->sync; } +struct nv50_display_flip { + struct nv50_disp *disp; + struct nv50_sync *chan; +}; + +static bool +nv50_display_flip_wait(void *data) +{ + struct nv50_display_flip *flip = data; + if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == + flip->chan->data); + return true; + usleep_range(1, 2); + return false; +} + void nv50_display_flip_stop(struct drm_crtc *crtc) { - struct nv50_sync *sync = nv50_sync(crtc); + struct nouveau_device *device = nouveau_dev(crtc->dev); + struct nv50_display_flip flip = { + .disp = nv50_disp(crtc->dev), + .chan = nv50_sync(crtc), + }; u32 *push; - push = evo_wait(sync, 8); + push = evo_wait(flip.chan, 8); if (push) { evo_mthd(push, 0x0084, 1); evo_data(push, 0x00000000); @@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc) evo_data(push, 0x00000000); evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); - evo_kick(push, sync); + evo_kick(push, flip.chan); } + + nv_wait_cb(device, nv50_display_flip_wait, &flip); } int @@ -496,73 +516,78 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct nouveau_channel *chan, u32 swap_interval) { struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); - struct nv50_disp *disp = nv50_disp(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv50_sync *sync = nv50_sync(crtc); + int head = nv_crtc->index, ret; u32 *push; - int ret; swap_interval <<= 4; if (swap_interval == 0) swap_interval |= 0x100; + if (chan == NULL) + evo_sync(crtc->dev); push = evo_wait(sync, 128); if (unlikely(push == NULL)) return -EBUSY; - /* synchronise with the rendering channel, if necessary */ - if (likely(chan)) { + if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); + OUT_RING (chan, NvEvoSema0 + head); + OUT_RING (chan, sync->addr ^ 0x10); + BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); + OUT_RING (chan, sync->data + 1); + BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); + OUT_RING (chan, sync->addr); + OUT_RING (chan, sync->data); + } else + if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { + u64 addr = nv84_fence_crtc(chan, head) + sync->addr; + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); + OUT_RING (chan, chan->vram); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr ^ 0x10)); + OUT_RING (chan, lower_32_bits(addr ^ 0x10)); + OUT_RING (chan, sync->data + 1); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr)); + OUT_RING (chan, lower_32_bits(addr)); + OUT_RING (chan, sync->data); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); + } else + if (chan) { + u64 addr = nv84_fence_crtc(chan, head) + sync->addr; ret = RING_SPACE(chan, 10); if (ret) return ret; - if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); - OUT_RING (chan, NvEvoSema0 + nv_crtc->index); - OUT_RING (chan, sync->sem.offset); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); - OUT_RING (chan, sync->sem.offset ^ 0x10); - OUT_RING (chan, 0x74b1e000); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, NvSema); - } else - if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { - u64 offset = nv84_fence_crtc(chan, nv_crtc->index); - offset += sync->sem.offset; - - BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); - OUT_RING (chan, 0x00000002); - BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset ^ 0x10)); - OUT_RING (chan, 0x74b1e000); - OUT_RING (chan, 0x00000001); - } else { - u64 offset = nv84_fence_crtc(chan, nv_crtc->index); - offset += sync->sem.offset; - - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); - OUT_RING (chan, 0x00001002); - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset ^ 0x10)); - OUT_RING (chan, 0x74b1e000); - OUT_RING (chan, 0x00001001); - } + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr ^ 0x10)); + OUT_RING (chan, lower_32_bits(addr ^ 0x10)); + OUT_RING (chan, sync->data + 1); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG | + NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr)); + OUT_RING (chan, lower_32_bits(addr)); + OUT_RING (chan, sync->data); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL | + NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); + } + if (chan) { + sync->addr ^= 0x10; + sync->data++; FIRE_RING (chan); - } else { - nouveau_bo_wr32(disp->sync, sync->sem.offset / 4, - 0xf00d0000 | sync->sem.value); - evo_sync(crtc->dev); } /* queue the flip */ @@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, evo_data(push, 0x40000000); } evo_mthd(push, 0x0088, 4); - evo_data(push, sync->sem.offset); - evo_data(push, 0xf00d0000 | sync->sem.value); - evo_data(push, 0x74b1e000); + evo_data(push, sync->addr); + evo_data(push, sync->data++); + evo_data(push, sync->data); evo_data(push, NvEvoSync); evo_mthd(push, 0x00a0, 2); evo_data(push, 0x00000000); @@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, sync); - - sync->sem.offset ^= 0x10; - sync->sem.value++; return 0; } @@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) if (ret) goto out; - head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); + head->sync.addr = EVO_FLIP_SEM0(index); + head->sync.data = 0x00000000; /* allocate overlay resources */ ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, @@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev) int nv50_display_init(struct drm_device *dev) { - u32 *push = evo_wait(nv50_mast(dev), 32); - if (push) { - evo_mthd(push, 0x0088, 1); - evo_data(push, NvEvoSync); - evo_kick(push, nv50_mast(dev)); - return 0; + struct nv50_disp *disp = nv50_disp(dev); + struct drm_crtc *crtc; + u32 *push; + + push = evo_wait(nv50_mast(dev), 32); + if (!push) + return -EBUSY; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nv50_sync *sync = nv50_sync(crtc); + nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data); } - return -EBUSY; + evo_mthd(push, 0x0088, 1); + evo_data(push, NvEvoSync); + evo_kick(push, nv50_mast(dev)); + return 0; } void @@ -2245,6 +2276,7 @@ nv50_display_create(struct drm_device *dev) NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n", dcbe->location, dcbe->type, ffs(dcbe->or) - 1, ret); + ret = 0; } } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 3c38ea46531c..305a657bf215 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) if (tmp & L2_BUSY) reset_mask |= RADEON_RESET_VMC; + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & RADEON_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~RADEON_RESET_MC; + } + return reset_mask; } diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 99fb13286fd0..eb8ac315f92f 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, __func__, __LINE__, toffset, surf.base_align); return -EINVAL; } - if (moffset & (surf.base_align - 1)) { + if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) { dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", __func__, __LINE__, moffset, surf.base_align); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7cead763be9e..27769e724b6d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x9907) || (rdev->pdev->device == 0x9908) || (rdev->pdev->device == 0x9909) || + (rdev->pdev->device == 0x990B) || + (rdev->pdev->device == 0x990C) || + (rdev->pdev->device == 0x990F) || (rdev->pdev->device == 0x9910) || - (rdev->pdev->device == 0x9917)) { + (rdev->pdev->device == 0x9917) || + (rdev->pdev->device == 0x9999)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || + (rdev->pdev->device == 0x990D) || + (rdev->pdev->device == 0x990E) || (rdev->pdev->device == 0x9913) || (rdev->pdev->device == 0x9918)) { rdev->config.cayman.max_simds_per_se = 4; @@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9994) || + (rdev->pdev->device == 0x9995) || + (rdev->pdev->device == 0x9996) || + (rdev->pdev->device == 0x999A) || (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; @@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, - rdev->config.cayman.max_backends_per_se * - rdev->config.cayman.max_shader_engines, - CAYMAN_MAX_BACKENDS, disabled_rb_mask); + if ((rdev->config.cayman.max_backends_per_se == 1) && + (rdev->flags & RADEON_IS_IGP)) { + if ((disabled_rb_mask & 3) == 1) { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; + } else { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, + rdev->config.cayman.max_backends_per_se * + rdev->config.cayman.max_shader_engines, + CAYMAN_MAX_BACKENDS, disabled_rb_mask); + } WREG32(GB_BACKEND_MAP, tmp); cgts_tcc_disable = 0xffff0000; @@ -1381,6 +1401,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) if (tmp & L2_BUSY) reset_mask |= RADEON_RESET_VMC; + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & RADEON_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~RADEON_RESET_MC; + } + return reset_mask; } @@ -1765,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); evergreen_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6d4b5611daf4..0740db3fcd22 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) if (r600_is_display_hung(rdev)) reset_mask |= RADEON_RESET_DISPLAY; + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & RADEON_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~RADEON_RESET_MC; + } + return reset_mask; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index bedda9caadd9..6e05a2e75a46 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, goto out_cleanup; } - /* r100 doesn't have dma engine so skip the test */ - /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ - /* skip it as well if domains are the same */ - if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { + if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, RADEON_BENCHMARK_COPY_DMA, n); if (time < 0) @@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, sdomain, ddomain, "dma"); } - time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); - if (time < 0) - goto out_cleanup; - if (time > 0) - radeon_benchmark_log_results(n, size, time, - sdomain, ddomain, "blit"); + if (rdev->asic->copy.blit) { + time = radeon_benchmark_do_move(rdev, size, saddr, daddr, + RADEON_BENCHMARK_COPY_BLIT, n); + if (time < 0) + goto out_cleanup; + if (time > 0) + radeon_benchmark_log_results(n, size, time, + sdomain, ddomain, "blit"); + } out_cleanup: if (sobj) { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3e403bdda58f..78edadc9e86b 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct found = 1; } + /* quirks */ + /* Radeon 9100 (R200) */ + if ((dev->pdev->device == 0x514D) && + (dev->pdev->subsystem_vendor == 0x174B) && + (dev->pdev->subsystem_device == 0x7149)) { + /* vbios value is bad, use the default */ + found = 0; + } + if (!found) /* fallback to defaults */ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 167758488ed6..66a7f0fd9620 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -70,9 +70,10 @@ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA * 2.28.0 - r600-eg: Add MEM_WRITE packet support * 2.29.0 - R500 FP16 color clear registers + * 2.30.0 - fix for FMASK texturing */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 29 +#define KMS_DRIVER_MINOR 30 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 90374dd77960..48f80cd42d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; + if (!rdev->ddev->irq_enabled) + return; + spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.afmt[block] = true; radeon_irq_set(rdev); @@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; + if (!rdev->ddev->irq_enabled) + return; + spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.afmt[block] = false; radeon_irq_set(rdev); @@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; + if (!rdev->ddev->irq_enabled) + return; + spin_lock_irqsave(&rdev->irq.lock, irqflags); for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); @@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; + if (!rdev->ddev->irq_enabled) + return; + spin_lock_irqsave(&rdev->irq.lock, irqflags); for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 80979ed951eb..bafbe3216952 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) if (tmp & L2_BUSY) reset_mask |= RADEON_RESET_VMC; + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & RADEON_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~RADEON_RESET_MC; + } + return reset_mask; } @@ -4463,6 +4469,7 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { + radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); si_irq_suspend(rdev); diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index c92955df0658..be1daf7344d3 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -4,7 +4,6 @@ config DRM_TEGRA select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - select DRM_HDMI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT |