diff options
Diffstat (limited to 'drivers/gpu')
98 files changed, 1496 insertions, 965 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 796dbb212a41..8492b68e873c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, static inline void ast_open_key(struct ast_private *ast) { - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); } #define AST_VIDMEM_SIZE_8M 0x00800000 diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 772c62a6e2ac..4752f223e5b2 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,11 +1,12 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" - depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) + depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE + select VIDEOMODE_HELPERS help Choose this option if you have a Samsung SoC EXYNOS chipset. If M is selected the module will be called exynosdrm. @@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF config DRM_EXYNOS_FIMD bool "Exynos DRM FIMD" - depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM + depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM select FB_MODE_HELPERS - select VIDEOMODE_HELPERS help Choose this option if you want to use Exynos FIMD for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 30ef41bcd7b8..6a8c84e7c839 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/i2c.h> - +#include <linux/of.h> #include "exynos_drm_drv.h" #include "exynos_hdmi.h" @@ -41,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client) return 0; } -static struct i2c_device_id ddc_idtable[] = { - {"s5p_ddc", 0}, - {"exynos5-hdmiddc", 0}, - { }, -}; - -#ifdef CONFIG_OF static struct of_device_id hdmiddc_match_types[] = { { .compatible = "samsung,exynos5-hdmiddc", @@ -57,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = { /* end node */ } }; -#endif struct i2c_driver ddc_driver = { .driver = { .name = "exynos-hdmiddc", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(hdmiddc_match_types), + .of_match_table = hdmiddc_match_types, }, - .id_table = ddc_idtable, .probe = s5p_ddc_probe, .remove = s5p_ddc_remove, .command = NULL, diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index b8ac06d92fbf..3445a0f3a6b2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, DRM_DEBUG_KMS("desired size = 0x%x\n", size); buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!buffer) { - DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); + if (!buffer) return NULL; - } buffer->size = size; return buffer; @@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buffer) { - if (!buffer) { - DRM_DEBUG_KMS("buffer is null.\n"); - return; - } - kfree(buffer); buffer = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 02a8bc5226ca..e082efb2fece 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -17,6 +17,7 @@ #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_connector.h" #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ drm_connector) @@ -28,35 +29,6 @@ struct exynos_drm_connector { uint32_t dpms; }; -/* convert exynos_video_timings to drm_display_mode */ -static inline void -convert_to_display_mode(struct drm_display_mode *mode, - struct exynos_drm_panel_info *panel) -{ - struct fb_videomode *timing = &panel->timing; - - mode->clock = timing->pixclock / 1000; - mode->vrefresh = timing->refresh; - - mode->hdisplay = timing->xres; - mode->hsync_start = mode->hdisplay + timing->right_margin; - mode->hsync_end = mode->hsync_start + timing->hsync_len; - mode->htotal = mode->hsync_end + timing->left_margin; - - mode->vdisplay = timing->yres; - mode->vsync_start = mode->vdisplay + timing->lower_margin; - mode->vsync_end = mode->vsync_start + timing->vsync_len; - mode->vtotal = mode->vsync_end + timing->upper_margin; - mode->width_mm = panel->width_mm; - mode->height_mm = panel->height_mm; - - if (timing->vmode & FB_VMODE_INTERLACED) - mode->flags |= DRM_MODE_FLAG_INTERLACE; - - if (timing->vmode & FB_VMODE_DOUBLE) - mode->flags |= DRM_MODE_FLAG_DBLSCAN; -} - static int exynos_drm_connector_get_modes(struct drm_connector *connector) { struct exynos_drm_connector *exynos_connector = @@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) return 0; } - convert_to_display_mode(mode, panel); + drm_display_mode_from_videomode(&panel->vm, mode); + mode->width_mm = panel->width_mm; + mode->height_mm = panel->height_mm; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; @@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, int err; exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); - if (!exynos_connector) { - DRM_ERROR("failed to allocate connector\n"); + if (!exynos_connector) return NULL; - } connector = &exynos_connector->drm_connector; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 14f5c1d34028..ebc01503d50e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -15,6 +15,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" #include "exynos_drm_plane.h" @@ -324,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) struct drm_crtc *crtc; exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); - if (!exynos_crtc) { - DRM_ERROR("failed to allocate exynos crtc\n"); + if (!exynos_crtc) return -ENOMEM; - } exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index fd76449cf452..59827cc5e770 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -11,6 +11,7 @@ #include <drm/drmP.h> #include <drm/exynos_drm.h> +#include "exynos_drm_dmabuf.h" #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" @@ -230,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { - DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index df81d3c959b4..bb82ef78ca85 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) int nr; private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); - if (!private) { - DRM_ERROR("failed to allocate private\n"); + if (!private) return -ENOMEM; - } INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index a99a033793bc..06f1b2a09da7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev, return NULL; exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); - if (!exynos_encoder) { - DRM_ERROR("failed to allocate encoder\n"); + if (!exynos_encoder) return NULL; - } exynos_encoder->dpms = DRM_MODE_DPMS_OFF; exynos_encoder->manager = manager; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index c2d149f0408a..ea39e0ef2ae4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev, } exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) { - DRM_ERROR("failed to allocate exynos drm framebuffer\n"); + if (!exynos_fb) return ERR_PTR(-ENOMEM); - } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; @@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, int i, ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) { - DRM_ERROR("failed to allocate exynos drm framebuffer\n"); + if (!exynos_fb) return ERR_PTR(-ENOMEM); - } obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!obj) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 8e60bd61137f..78e868bcf1ec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -16,9 +16,11 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" +#include "exynos_drm_fbdev.h" #include "exynos_drm_gem.h" #include "exynos_drm_iommu.h" @@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; - /* 0 means to allocate physically continuous memory */ - exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); + exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size); + /* + * If physically contiguous memory allocation fails and if IOMMU is + * supported then try to get buffer from non physically contiguous + * memory area. + */ + if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) { + dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); + exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, + size); + } + if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); goto err_release_framebuffer; @@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev) return 0; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); - if (!fbdev) { - DRM_ERROR("failed to allocate drm fbdev.\n"); + if (!fbdev) return -ENOMEM; - } private->fb_helper = helper = &fbdev->drm_fb_helper; helper->funcs = &exynos_drm_fb_helper_funcs; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6e047bd53e2f..8adfc8f1e08f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -17,10 +17,12 @@ #include <linux/regmap.h> #include <linux/clk.h> #include <linux/pm_runtime.h> +#include <linux/of.h> #include <drm/drmP.h> #include <drm/exynos_drm.h> #include "regs-fimc.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" #include "exynos_drm_fimc.h" @@ -1343,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->writeback = 1; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 1c263dac3c1c..868a14d52995 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -16,10 +16,12 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/of.h> #include <linux/of_device.h> #include <linux/pm_runtime.h> #include <video/of_display_timing.h> +#include <video/of_videomode.h> #include <video/samsung_fimd.h> #include <drm/exynos_drm.h> @@ -35,6 +37,8 @@ * CPU Interface. */ +#define FIMD_DEFAULT_FRAMERATE 60 + /* position control register for hardware window 0, 2 ~ 4.*/ #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) @@ -65,11 +69,13 @@ struct fimd_driver_data { unsigned int has_shadowcon:1; unsigned int has_clksel:1; + unsigned int has_limited_fmt:1; }; static struct fimd_driver_data s3c64xx_fimd_driver_data = { .timing_base = 0x0, .has_clksel = 1, + .has_limited_fmt = 1, }; static struct fimd_driver_data exynos4_fimd_driver_data = { @@ -90,6 +96,7 @@ struct fimd_win_data { unsigned int fb_width; unsigned int fb_height; unsigned int bpp; + unsigned int pixel_format; dma_addr_t dma_addr; unsigned int buf_offsize; unsigned int line_size; /* bytes */ @@ -115,11 +122,10 @@ struct fimd_context { wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; - struct exynos_drm_panel_info *panel; + struct exynos_drm_panel_info panel; struct fimd_driver_data *driver_data; }; -#ifdef CONFIG_OF static const struct of_device_id fimd_driver_dt_match[] = { { .compatible = "samsung,s3c6400-fimd", .data = &s3c64xx_fimd_driver_data }, @@ -129,21 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos5_fimd_driver_data }, {}, }; -#endif static inline struct fimd_driver_data *drm_fimd_get_driver_data( struct platform_device *pdev) { -#ifdef CONFIG_OF const struct of_device_id *of_id = of_match_device(fimd_driver_dt_match, &pdev->dev); - if (of_id) - return (struct fimd_driver_data *)of_id->data; -#endif - - return (struct fimd_driver_data *) - platform_get_device_id(pdev)->driver_data; + return (struct fimd_driver_data *)of_id->data; } static bool fimd_display_is_connected(struct device *dev) @@ -157,7 +156,7 @@ static void *fimd_get_panel(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); - return ctx->panel; + return &ctx->panel; } static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode) @@ -237,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev) static void fimd_commit(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); - struct exynos_drm_panel_info *panel = ctx->panel; - struct fb_videomode *timing = &panel->timing; + struct exynos_drm_panel_info *panel = &ctx->panel; + struct videomode *vm = &panel->vm; struct fimd_driver_data *driver_data; u32 val; @@ -250,22 +249,22 @@ static void fimd_commit(struct device *dev) writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); /* setup vertical timing values. */ - val = VIDTCON0_VBPD(timing->upper_margin - 1) | - VIDTCON0_VFPD(timing->lower_margin - 1) | - VIDTCON0_VSPW(timing->vsync_len - 1); + val = VIDTCON0_VBPD(vm->vback_porch - 1) | + VIDTCON0_VFPD(vm->vfront_porch - 1) | + VIDTCON0_VSPW(vm->vsync_len - 1); writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); /* setup horizontal timing values. */ - val = VIDTCON1_HBPD(timing->left_margin - 1) | - VIDTCON1_HFPD(timing->right_margin - 1) | - VIDTCON1_HSPW(timing->hsync_len - 1); + val = VIDTCON1_HBPD(vm->hback_porch - 1) | + VIDTCON1_HFPD(vm->hfront_porch - 1) | + VIDTCON1_HSPW(vm->hsync_len - 1); writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); /* setup horizontal and vertical display size. */ - val = VIDTCON2_LINEVAL(timing->yres - 1) | - VIDTCON2_HOZVAL(timing->xres - 1) | - VIDTCON2_LINEVAL_E(timing->yres - 1) | - VIDTCON2_HOZVAL_E(timing->xres - 1); + val = VIDTCON2_LINEVAL(vm->vactive - 1) | + VIDTCON2_HOZVAL(vm->hactive - 1) | + VIDTCON2_LINEVAL_E(vm->vactive - 1) | + VIDTCON2_HOZVAL_E(vm->hactive - 1); writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); /* setup clock source, clock divider, enable dma. */ @@ -396,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev, win_data->fb_height = overlay->fb_height; win_data->dma_addr = overlay->dma_addr[0] + offset; win_data->bpp = overlay->bpp; + win_data->pixel_format = overlay->pixel_format; win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * (overlay->bpp >> 3); win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); @@ -417,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win) val = WINCONx_ENWIN; - switch (win_data->bpp) { - case 1: - val |= WINCON0_BPPMODE_1BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_4WORD; - break; - case 2: - val |= WINCON0_BPPMODE_2BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_8WORD; - break; - case 4: - val |= WINCON0_BPPMODE_4BPP; - val |= WINCONx_BITSWP; - val |= WINCONx_BURSTLEN_8WORD; - break; - case 8: + /* + * In case of s3c64xx, window 0 doesn't support alpha channel. + * So the request format is ARGB8888 then change it to XRGB8888. + */ + if (ctx->driver_data->has_limited_fmt && !win) { + if (win_data->pixel_format == DRM_FORMAT_ARGB8888) + win_data->pixel_format = DRM_FORMAT_XRGB8888; + } + + switch (win_data->pixel_format) { + case DRM_FORMAT_C8: val |= WINCON0_BPPMODE_8BPP_PALETTE; val |= WINCONx_BURSTLEN_8WORD; val |= WINCONx_BYTSWP; break; - case 16: + case DRM_FORMAT_XRGB1555: + val |= WINCON0_BPPMODE_16BPP_1555; + val |= WINCONx_HAWSWP; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_RGB565: val |= WINCON0_BPPMODE_16BPP_565; val |= WINCONx_HAWSWP; val |= WINCONx_BURSTLEN_16WORD; break; - case 24: + case DRM_FORMAT_XRGB8888: val |= WINCON0_BPPMODE_24BPP_888; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; break; - case 32: - val |= WINCON1_BPPMODE_28BPP_A4888 + case DRM_FORMAT_ARGB8888: + val |= WINCON1_BPPMODE_25BPP_A1888 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; @@ -746,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) drm_iommu_detach_device(drm_dev, dev); } -static int fimd_calc_clkdiv(struct fimd_context *ctx, - struct fb_videomode *timing) +static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev) { - unsigned long clk = clk_get_rate(ctx->lcd_clk); - u32 retrace; - u32 clkdiv; - u32 best_framerate = 0; - u32 framerate; - - retrace = timing->left_margin + timing->hsync_len + - timing->right_margin + timing->xres; - retrace *= timing->upper_margin + timing->vsync_len + - timing->lower_margin + timing->yres; - - /* default framerate is 60Hz */ - if (!timing->refresh) - timing->refresh = 60; - - clk /= retrace; - - for (clkdiv = 1; clkdiv < 0x100; clkdiv++) { - int tmp; - - /* get best framerate */ - framerate = clk / clkdiv; - tmp = timing->refresh - framerate; - if (tmp < 0) { - best_framerate = framerate; - continue; - } else { - if (!best_framerate) - best_framerate = framerate; - else if (tmp < (best_framerate - framerate)) - best_framerate = framerate; - break; + struct videomode *vm = &ctx->panel.vm; + unsigned long clk; + + ctx->bus_clk = devm_clk_get(dev, "fimd"); + if (IS_ERR(ctx->bus_clk)) { + dev_err(dev, "failed to get bus clock\n"); + return PTR_ERR(ctx->bus_clk); + } + + ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); + if (IS_ERR(ctx->lcd_clk)) { + dev_err(dev, "failed to get lcd clock\n"); + return PTR_ERR(ctx->lcd_clk); + } + + clk = clk_get_rate(ctx->lcd_clk); + if (clk == 0) { + dev_err(dev, "error getting sclk_fimd clock rate\n"); + return -EINVAL; + } + + if (vm->pixelclock == 0) { + unsigned long c; + c = vm->hactive + vm->hback_porch + vm->hfront_porch + + vm->hsync_len; + c *= vm->vactive + vm->vback_porch + vm->vfront_porch + + vm->vsync_len; + vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE; + if (vm->pixelclock == 0) { + dev_err(dev, "incorrect display timings\n"); + return -EINVAL; } + dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n", + vm->pixelclock, FIMD_DEFAULT_FRAMERATE); } + ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock); + if (ctx->clkdiv > 256) { + dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n", + ctx->clkdiv); + ctx->clkdiv = 256; + } + vm->pixelclock = clk / ctx->clkdiv; + DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock, + ctx->clkdiv); - return clkdiv; + return 0; } static void fimd_clear_win(struct fimd_context *ctx, int win) @@ -876,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable) return 0; } +static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev) +{ + struct videomode *vm; + int ret; + + vm = &ctx->panel.vm; + ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE); + if (ret) { + DRM_ERROR("failed: of_get_videomode() : %d\n", ret); + return ret; + } + + if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW) + ctx->vidcon1 |= VIDCON1_INV_VSYNC; + if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW) + ctx->vidcon1 |= VIDCON1_INV_HSYNC; + if (vm->flags & DISPLAY_FLAGS_DE_LOW) + ctx->vidcon1 |= VIDCON1_INV_VDEN; + if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) + ctx->vidcon1 |= VIDCON1_INV_VCLK; + + return 0; +} + static int fimd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimd_context *ctx; struct exynos_drm_subdrv *subdrv; - struct exynos_drm_fimd_pdata *pdata; - struct exynos_drm_panel_info *panel; struct resource *res; int win; int ret = -EINVAL; - if (dev->of_node) { - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) { - DRM_ERROR("memory allocation for pdata failed\n"); - return -ENOMEM; - } - - ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing, - OF_USE_NATIVE_MODE); - if (ret) { - DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret); - return ret; - } - } else { - pdata = dev->platform_data; - if (!pdata) { - DRM_ERROR("no platform data specified\n"); - return -EINVAL; - } - } - - panel = &pdata->panel; - if (!panel) { - dev_err(dev, "panel is null.\n"); - return -EINVAL; - } + if (!dev->of_node) + return -ENODEV; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->bus_clk = devm_clk_get(dev, "fimd"); - if (IS_ERR(ctx->bus_clk)) { - dev_err(dev, "failed to get bus clock\n"); - return PTR_ERR(ctx->bus_clk); - } + ret = fimd_get_platform_data(ctx, dev); + if (ret) + return ret; - ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); - if (IS_ERR(ctx->lcd_clk)) { - dev_err(dev, "failed to get lcd clock\n"); - return PTR_ERR(ctx->lcd_clk); - } + ret = fimd_configure_clocks(ctx, dev); + if (ret) + return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -952,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev) } ctx->driver_data = drm_fimd_get_driver_data(pdev); - ctx->vidcon0 = pdata->vidcon0; - ctx->vidcon1 = pdata->vidcon1; - ctx->default_win = pdata->default_win; - ctx->panel = panel; DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue); atomic_set(&ctx->wait_vsync_event, 0); @@ -973,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_sync(dev); - ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing); - panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv; - - DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n", - panel->timing.pixclock, ctx->clkdiv); - for (win = 0; win < WINDOWS_NR; win++) fimd_clear_win(ctx, win); @@ -1067,20 +1059,6 @@ static int fimd_runtime_resume(struct device *dev) } #endif -static struct platform_device_id fimd_driver_ids[] = { - { - .name = "s3c64xx-fb", - .driver_data = (unsigned long)&s3c64xx_fimd_driver_data, - }, { - .name = "exynos4-fb", - .driver_data = (unsigned long)&exynos4_fimd_driver_data, - }, { - .name = "exynos5-fb", - .driver_data = (unsigned long)&exynos5_fimd_driver_data, - }, - {}, -}; - static const struct dev_pm_ops fimd_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) @@ -1089,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = { struct platform_driver fimd_driver = { .probe = fimd_probe, .remove = fimd_remove, - .id_table = fimd_driver_ids, .driver = { .name = "exynos4-fb", .owner = THIS_MODULE, .pm = &fimd_pm_ops, - .of_match_table = of_match_ptr(fimd_driver_dt_match), + .of_match_table = fimd_driver_dt_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index eddea4941483..3271fd4b1724 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -23,6 +23,7 @@ #include <drm/drmP.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" +#include "exynos_drm_g2d.h" #include "exynos_drm_gem.h" #include "exynos_drm_iommu.h" @@ -446,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, } g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); - if (!g2d_userptr) { - DRM_ERROR("failed to allocate g2d_userptr.\n"); + if (!g2d_userptr) return ERR_PTR(-ENOMEM); - } atomic_set(&g2d_userptr->refcount, 1); @@ -499,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { - DRM_ERROR("failed to allocate sg table.\n"); ret = -ENOMEM; goto err_free_userptr; } @@ -808,17 +806,8 @@ static void g2d_dma_start(struct g2d_data *g2d, int ret; ret = pm_runtime_get_sync(g2d->dev); - if (ret < 0) { - dev_warn(g2d->dev, "failed pm power on.\n"); - return; - } - - ret = clk_prepare_enable(g2d->gate_clk); - if (ret < 0) { - dev_warn(g2d->dev, "failed to enable clock.\n"); - pm_runtime_put_sync(g2d->dev); + if (ret < 0) return; - } writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); @@ -871,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work) runqueue_work); mutex_lock(&g2d->runqueue_mutex); - clk_disable_unprepare(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); complete(&g2d->runqueue_node->complete); @@ -1096,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, e = kzalloc(sizeof(*node->event), GFP_KERNEL); if (!e) { - dev_err(dev, "failed to allocate event\n"); - spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -1327,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, struct exynos_drm_g2d_private *g2d_priv; g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); - if (!g2d_priv) { - dev_err(dev, "failed to allocate g2d private data\n"); + if (!g2d_priv) return -ENOMEM; - } g2d_priv->dev = dev; file_priv->g2d_priv = g2d_priv; @@ -1386,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev) int ret; g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); - if (!g2d) { - dev_err(dev, "failed to allocate driver data\n"); + if (!g2d) return -ENOMEM; - } g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", sizeof(struct g2d_runqueue_node), 0, 0, NULL); @@ -1524,14 +1506,38 @@ static int g2d_resume(struct device *dev) } #endif -static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); +#ifdef CONFIG_PM_RUNTIME +static int g2d_runtime_suspend(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); + + clk_disable_unprepare(g2d->gate_clk); + + return 0; +} + +static int g2d_runtime_resume(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(g2d->gate_clk); + if (ret < 0) + dev_warn(dev, "failed to enable clock.\n"); + + return ret; +} +#endif + +static const struct dev_pm_ops g2d_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) + SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) +}; -#ifdef CONFIG_OF static const struct of_device_id exynos_g2d_match[] = { { .compatible = "samsung,exynos5250-g2d" }, {}, }; -#endif struct platform_driver g2d_driver = { .probe = g2d_probe, @@ -1540,6 +1546,6 @@ struct platform_driver g2d_driver = { .name = "s5p-g2d", .owner = THIS_MODULE, .pm = &g2d_pm_ops, - .of_match_table = of_match_ptr(exynos_g2d_match), + .of_match_table = exynos_g2d_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f3c6f40666e1..49f9cd232757 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -18,6 +18,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" #include "exynos_drm_buf.h" +#include "exynos_drm_iommu.h" static unsigned int convert_to_vm_err_msg(int msg) { @@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, int ret; exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); - if (!exynos_gem_obj) { - DRM_ERROR("failed to allocate exynos gem object\n"); + if (!exynos_gem_obj) return NULL; - } exynos_gem_obj->size = size; obj = &exynos_gem_obj->base; @@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG | EXYNOS_BO_WC, args->size); + /* + * If physically contiguous memory allocation fails and if IOMMU is + * supported then try to get buffer from non physically contiguous + * memory area. + */ + if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) { + dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); + exynos_gem_obj = exynos_drm_gem_create(dev, + EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, + args->size); + } + if (IS_ERR(exynos_gem_obj)) return PTR_ERR(exynos_gem_obj); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 90b8a1a5344c..cd6aebd53bd0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -20,6 +20,7 @@ #include <drm/drmP.h> #include <drm/exynos_drm.h> #include "regs-gsc.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" #include "exynos_drm_gsc.h" @@ -1337,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->writeback = 1; diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 8d3bc01d6834..8548b974bd59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev) struct drm_hdmi_context *ctx; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - DRM_LOG_KMS("failed to alloc common hdmi context.\n"); + if (!ctx) return -ENOMEM; - } subdrv = &ctx->subdrv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c index 3799d5c2b5df..fb8db0378274 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev) dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + goto error; + dma_set_max_seg_size(dev, 0xffffffffu); dev->archdata.mapping = mapping; return 0; +error: + arm_iommu_release_mapping(mapping); + return -ENOMEM; } /* @@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev, subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, sizeof(*subdrv_dev->dma_parms), GFP_KERNEL); + if (!subdrv_dev->dma_parms) + return -ENOMEM; + dma_set_max_seg_size(subdrv_dev, 0xffffffffu); ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d2b6ab4def93..824e0705c8d3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) struct drm_exynos_ipp_cmd_work *cmd_work; cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); - if (!cmd_work) { - DRM_ERROR("failed to alloc cmd_work.\n"); + if (!cmd_work) return ERR_PTR(-ENOMEM); - } INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); @@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) struct drm_exynos_ipp_event_work *event_work; event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); - if (!event_work) { - DRM_ERROR("failed to alloc event_work.\n"); + if (!event_work) return ERR_PTR(-ENOMEM); - } INIT_WORK((struct work_struct *)event_work, ipp_sched_event); @@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, /* allocate command node */ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); - if (!c_node) { - DRM_ERROR("failed to allocate map node.\n"); + if (!c_node) return -ENOMEM; - } /* create property id */ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, @@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node mutex_lock(&c_node->mem_lock); m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); - if (!m_node) { - DRM_ERROR("failed to allocate queue node.\n"); + if (!m_node) goto err_unlock; - } /* clear base address for error handling */ memset(&buf_info, 0x0, sizeof(buf_info)); @@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev, DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - DRM_ERROR("failed to allocate event.\n"); spin_lock_irqsave(&drm_dev->event_lock, flags); file->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, struct exynos_drm_ipp_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - DRM_ERROR("failed to allocate priv.\n"); + if (!priv) return -ENOMEM; - } priv->dev = dev; file_priv->ipp_priv = priv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 6ee55e68e0a2..fcb0652e77d0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -16,6 +16,7 @@ #include "exynos_drm_encoder.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" +#include "exynos_drm_plane.h" #define to_exynos_plane(x) container_of(x, struct exynos_plane, base) @@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev, int err; exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); - if (!exynos_plane) { - DRM_ERROR("failed to allocate plane\n"); + if (!exynos_plane) return NULL; - } err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, &exynos_plane_funcs, formats, ARRAY_SIZE(formats), diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 49669aa24c45..7b901688defa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -21,6 +21,7 @@ #include <drm/exynos_drm.h> #include "regs-rotator.h" #include "exynos_drm.h" +#include "exynos_drm_drv.h" #include "exynos_drm_ipp.h" /* @@ -471,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) struct drm_exynos_ipp_prop_list *prop_list; prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL); - if (!prop_list) { - DRM_ERROR("failed to alloc property list.\n"); + if (!prop_list) return -ENOMEM; - } prop_list->version = 1; prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | @@ -631,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) return 0; } +static struct rot_limit_table rot_limit_tbl_4210 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_64K, + .max_h = SZ_64K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_16K, + .max_h = SZ_16K, + .align = 2, + }, +}; + +static struct rot_limit_table rot_limit_tbl_4x12 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_32K, + .max_h = SZ_32K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_8K, + .max_h = SZ_8K, + .align = 2, + }, +}; + +static struct rot_limit_table rot_limit_tbl_5250 = { + .ycbcr420_2p = { + .min_w = 32, + .min_h = 32, + .max_w = SZ_32K, + .max_h = SZ_32K, + .align = 3, + }, + .rgb888 = { + .min_w = 8, + .min_h = 8, + .max_w = SZ_8K, + .max_h = SZ_8K, + .align = 1, + }, +}; + +static const struct of_device_id exynos_rotator_match[] = { + { + .compatible = "samsung,exynos4210-rotator", + .data = &rot_limit_tbl_4210, + }, + { + .compatible = "samsung,exynos4212-rotator", + .data = &rot_limit_tbl_4x12, + }, + { + .compatible = "samsung,exynos5250-rotator", + .data = &rot_limit_tbl_5250, + }, + {}, +}; + static int rotator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rot_context *rot; struct exynos_drm_ippdrv *ippdrv; + const struct of_device_id *match; int ret; + if (!dev->of_node) { + dev_err(dev, "cannot find of_node.\n"); + return -ENODEV; + } + rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); - if (!rot) { - dev_err(dev, "failed to allocate rot\n"); + if (!rot) return -ENOMEM; - } - rot->limit_tbl = (struct rot_limit_table *) - platform_get_device_id(pdev)->driver_data; + match = of_match_node(exynos_rotator_match, dev->of_node); + if (!match) { + dev_err(dev, "failed to match node\n"); + return -ENODEV; + } + rot->limit_tbl = (struct rot_limit_table *)match->data; rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rot->regs = devm_ioremap_resource(dev, rot->regs_res); @@ -717,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev) return 0; } -static struct rot_limit_table rot_limit_tbl = { - .ycbcr420_2p = { - .min_w = 32, - .min_h = 32, - .max_w = SZ_32K, - .max_h = SZ_32K, - .align = 3, - }, - .rgb888 = { - .min_w = 8, - .min_h = 8, - .max_w = SZ_8K, - .max_h = SZ_8K, - .align = 2, - }, -}; - -static struct platform_device_id rotator_driver_ids[] = { - { - .name = "exynos-rot", - .driver_data = (unsigned long)&rot_limit_tbl, - }, - {}, -}; - static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -803,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = { struct platform_driver rotator_driver = { .probe = rotator_probe, .remove = rotator_remove, - .id_table = rotator_driver_ids, .driver = { .name = "exynos-rot", .owner = THIS_MODULE, .pm = &rotator_pm_ops, + .of_match_table = exynos_rotator_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index c57c56519add..4400330e4449 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -23,6 +23,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_vidi.h" /* vidi has totally three virtual windows. */ #define WINDOWS_NR 3 diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 2f5c6942c968..a0e10aeb0e67 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -32,6 +32,7 @@ #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <drm/exynos_drm.h> @@ -1824,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata) res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); - if (!res->regul_bulk) { - DRM_ERROR("failed to get memory for regulators\n"); + if (!res->regul_bulk) goto fail; - } for (i = 0; i < ARRAY_SIZE(supply); ++i) { res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; @@ -1859,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) hdmi_hdmiphy = hdmiphy; } -#ifdef CONFIG_OF static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata (struct device *dev) { @@ -1868,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata u32 value; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); - if (!pd) { - DRM_ERROR("memory allocation for pdata failed\n"); + if (!pd) goto err_data; - } if (!of_find_property(np, "hpd-gpio", &value)) { DRM_ERROR("no hpd gpio property found\n"); @@ -1885,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata err_data: return NULL; } -#else -static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata - (struct device *dev) -{ - return NULL; -} -#endif - -static struct platform_device_id hdmi_driver_types[] = { - { - .name = "s5pv210-hdmi", - .driver_data = HDMI_TYPE13, - }, { - .name = "exynos4-hdmi", - .driver_data = HDMI_TYPE13, - }, { - .name = "exynos4-hdmi14", - .driver_data = HDMI_TYPE14, - }, { - .name = "exynos5-hdmi", - .driver_data = HDMI_TYPE14, - }, { - /* end node */ - } -}; -#ifdef CONFIG_OF static struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos5-hdmi", @@ -1923,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = { /* end node */ } }; -#endif static int hdmi_probe(struct platform_device *pdev) { @@ -1932,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev) struct hdmi_context *hdata; struct s5p_hdmi_platform_data *pdata; struct resource *res; + const struct of_device_id *match; int ret; - if (dev->of_node) { - pdata = drm_hdmi_dt_parse_pdata(dev); - if (IS_ERR(pdata)) { - DRM_ERROR("failed to parse dt\n"); - return PTR_ERR(pdata); - } - } else { - pdata = dev->platform_data; - } + if (!dev->of_node) + return -ENODEV; - if (!pdata) { - DRM_ERROR("no platform data specified\n"); + pdata = drm_hdmi_dt_parse_pdata(dev); + if (!pdata) return -EINVAL; - } - drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), - GFP_KERNEL); - if (!drm_hdmi_ctx) { - DRM_ERROR("failed to allocate common hdmi context.\n"); + drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); + if (!drm_hdmi_ctx) return -ENOMEM; - } - hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), - GFP_KERNEL); - if (!hdata) { - DRM_ERROR("out of memory\n"); + hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); + if (!hdata) return -ENOMEM; - } mutex_init(&hdata->hdmi_mutex); @@ -1970,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drm_hdmi_ctx); - if (dev->of_node) { - const struct of_device_id *match; - match = of_match_node(of_match_ptr(hdmi_match_types), - dev->of_node); - if (match == NULL) - return -ENODEV; - hdata->type = (enum hdmi_type)match->data; - } else { - hdata->type = (enum hdmi_type)platform_get_device_id - (pdev)->driver_data; - } + match = of_match_node(hdmi_match_types, dev->of_node); + if (!match) + return -ENODEV; + hdata->type = (enum hdmi_type)match->data; hdata->hpd_gpio = pdata->hpd_gpio; hdata->dev = dev; ret = hdmi_resources_init(hdata); - if (ret) { DRM_ERROR("hdmi_resources_init failed\n"); return -EINVAL; @@ -2141,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = { struct platform_driver hdmi_driver = { .probe = hdmi_probe, .remove = hdmi_remove, - .id_table = hdmi_driver_types, .driver = { .name = "exynos-hdmi", .owner = THIS_MODULE, .pm = &hdmi_pm_ops, - .of_match_table = of_match_ptr(hdmi_match_types), + .of_match_table = hdmi_match_types, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index 6e320ae9afed..59abb1494ceb 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/i2c.h> +#include <linux/of.h> #include "exynos_drm_drv.h" #include "exynos_hdmi.h" @@ -39,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client) return 0; } -static const struct i2c_device_id hdmiphy_id[] = { - { "s5p_hdmiphy", 0 }, - { "exynos5-hdmiphy", 0 }, - { }, -}; - -#ifdef CONFIG_OF static struct of_device_id hdmiphy_match_types[] = { { .compatible = "samsung,exynos5-hdmiphy", @@ -57,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = { /* end node */ } }; -#endif struct i2c_driver hdmiphy_driver = { .driver = { .name = "exynos-hdmiphy", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(hdmiphy_match_types), + .of_match_table = hdmiphy_match_types, }, - .id_table = hdmiphy_id, .probe = hdmiphy_probe, .remove = hdmiphy_remove, .command = NULL, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index c9a137caea41..63bc5f92fbb3 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -30,6 +30,7 @@ #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> +#include <linux/of.h> #include <drm/exynos_drm.h> @@ -1185,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev) drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL); - if (!drm_hdmi_ctx) { - DRM_ERROR("failed to allocate common hdmi context.\n"); + if (!drm_hdmi_ctx) return -ENOMEM; - } ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - DRM_ERROR("failed to alloc mixer context.\n"); + if (!ctx) return -ENOMEM; - } mutex_init(&ctx->mixer_mutex); diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index c2bd711e86e9..b1f8fc69023f 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -596,6 +596,10 @@ tda998x_configure_audio(struct drm_encoder *encoder, cts_n = CTS_N_M(3) | CTS_N_K(3); ca_i2s = CA_I2S_CA_I2S(0); break; + + default: + BUG(); + return; } reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 55ab9246e1b9..a6f4cb5af185 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 rpstat, cagf; + u32 rpstat, cagf, reqf; u32 rpupei, rpcurup, rpprevup; u32 rpdownei, rpcurdown, rpprevdown; int max_freq; @@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) gen6_gt_force_wake_get(dev_priv); + reqf = I915_READ(GEN6_RPNSWREQ); + reqf &= ~GEN6_TURBO_DISABLE; + if (IS_HASWELL(dev)) + reqf >>= 24; + else + reqf >>= 25; + reqf *= GT_FREQUENCY_MULTIPLIER; + rpstat = I915_READ(GEN6_RPSTAT1); rpupei = I915_READ(GEN6_RP_CUR_UP_EI); rpcurup = I915_READ(GEN6_RP_CUR_UP); @@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); + seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fdaa0915ce56..9b265a4c6a3d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); - if (ret && ret != -ENODEV) - goto out; + if (!HAS_PCH_SPLIT(dev)) { + ret = vga_client_register(dev->pdev, dev, NULL, + i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; + } intel_register_dsm_handler(); @@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev) */ intel_fbdev_initial_config(dev); + /* + * Must do this after fbcon init so that + * vgacon_save_screen() works during the handover. + */ + i915_disable_vga_mem(dev); + /* Only enable hotplug handling once the fbdev is fully set up. */ dev_priv->enable_hotplug_processing = true; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ccb28ead3501..69d8ed5416c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable, static struct drm_driver driver; extern int intel_agp_enabled; -#define INTEL_VGA_DEVICE(id, info) { \ - .class = PCI_BASE_CLASS_DISPLAY << 16, \ - .class_mask = 0xff0000, \ - .vendor = 0x8086, \ - .device = id, \ - .subvendor = PCI_ANY_ID, \ - .subdevice = PCI_ANY_ID, \ - .driver_data = (unsigned long) info } - -#define INTEL_QUANTA_VGA_DEVICE(info) { \ - .class = PCI_BASE_CLASS_DISPLAY << 16, \ - .class_mask = 0xff0000, \ - .vendor = 0x8086, \ - .device = 0x16a, \ - .subvendor = 0x152d, \ - .subdevice = 0x8990, \ - .driver_data = (unsigned long) info } - - static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, @@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = { .has_vebox_ring = 1, }; +/* + * Make sure any device matches here are from most specific to most + * general. For example, since the Quanta match is based on the subsystem + * and subvendor IDs, we need it to come before the more general IVB + * PCI ID matches, otherwise we'll use the wrong info struct above. + */ +#define INTEL_PCI_IDS \ + INTEL_I830_IDS(&intel_i830_info), \ + INTEL_I845G_IDS(&intel_845g_info), \ + INTEL_I85X_IDS(&intel_i85x_info), \ + INTEL_I865G_IDS(&intel_i865g_info), \ + INTEL_I915G_IDS(&intel_i915g_info), \ + INTEL_I915GM_IDS(&intel_i915gm_info), \ + INTEL_I945G_IDS(&intel_i945g_info), \ + INTEL_I945GM_IDS(&intel_i945gm_info), \ + INTEL_I965G_IDS(&intel_i965g_info), \ + INTEL_G33_IDS(&intel_g33_info), \ + INTEL_I965GM_IDS(&intel_i965gm_info), \ + INTEL_GM45_IDS(&intel_gm45_info), \ + INTEL_G45_IDS(&intel_g45_info), \ + INTEL_PINEVIEW_IDS(&intel_pineview_info), \ + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ + INTEL_HSW_D_IDS(&intel_haswell_d_info), \ + INTEL_HSW_M_IDS(&intel_haswell_m_info), \ + INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ + INTEL_VLV_D_IDS(&intel_valleyview_d_info) + static const struct pci_device_id pciidlist[] = { /* aka */ - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ - INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), - INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ - INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ - INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ - INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ - INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ - INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ - INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ - INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ - INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ - INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ - INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ - INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ - INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ - INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ - INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ - INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ - INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ - INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ - INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ - INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ - INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ - INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ - INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ - INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), - INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), - INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), - INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), - INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), - INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), - INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), - INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), - INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), - INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), - INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), - INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ - INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ - INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ - INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ - INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ - INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ - INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ - INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ - INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ - INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ - INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ - INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ - INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ - INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ - INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ - INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ - INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ - INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ - INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ - INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ - INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ - INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ - INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ - INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ - INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ - INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ - INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ - INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ - INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ - INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ - INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ - INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ - INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ - INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ - INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ - INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ - INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ - INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ - INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ - INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ - INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ - INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ - INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ - INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ - INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ - INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ - INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ - INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ - INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ - INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ - INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ - INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ - INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ - INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), - INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), - INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), - INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), - INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), - INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), + INTEL_PCI_IDS, {0, 0, 0} }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 52a3785a3fdf..35874b3a86dc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1236,6 +1236,13 @@ typedef struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; + /** + * wq - Driver workqueue for GEM. + * + * NOTE: Work items scheduled here are not allowed to grab any modeset + * locks, for otherwise the flushing done in the pageflip code will + * result in deadlocks. + */ struct workqueue_struct *wq; /* Display functions */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2d1cb10d846f..d9e337feef14 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, void *i915_gem_object_alloc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); + return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); } void i915_gem_object_free(struct drm_i915_gem_object *obj) @@ -1695,6 +1695,7 @@ static long __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, bool purgeable_only) { + struct list_head still_bound_list; struct drm_i915_gem_object *obj, *next; long count = 0; @@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, } } - list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, - global_list) { + /* + * As we may completely rewrite the bound list whilst unbinding + * (due to retiring requests) we have to strictly process only + * one element of the list at the time, and recheck the list + * on every iteration. + */ + INIT_LIST_HEAD(&still_bound_list); + while (count < target && !list_empty(&dev_priv->mm.bound_list)) { struct i915_vma *vma, *v; + obj = list_first_entry(&dev_priv->mm.bound_list, + typeof(*obj), global_list); + list_move_tail(&obj->global_list, &still_bound_list); + if (!i915_gem_object_is_purgeable(obj) && purgeable_only) continue; + /* + * Hold a reference whilst we unbind this object, as we may + * end up waiting for and retiring requests. This might + * release the final reference (held by the active list) + * and result in the object being freed from under us. + * in this object being freed. + * + * Note 1: Shrinking the bound list is special since only active + * (and hence bound objects) can contain such limbo objects, so + * we don't need special tricks for shrinking the unbound list. + * The only other place where we have to be careful with active + * objects suddenly disappearing due to retiring requests is the + * eviction code. + * + * Note 2: Even though the bound list doesn't hold a reference + * to the object we can safely grab one here: The final object + * unreferencing and the bound_list are both protected by the + * dev->struct_mutex and so we won't ever be able to observe an + * object on the bound_list with a reference count equals 0. + */ + drm_gem_object_reference(&obj->base); + list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) if (i915_vma_unbind(vma)) break; - if (!i915_gem_object_put_pages(obj)) { + if (i915_gem_object_put_pages(obj) == 0) count += obj->base.size >> PAGE_SHIFT; - if (count >= target) - return count; - } + + drm_gem_object_unreference(&obj->base); } + list_splice(&still_bound_list, &dev_priv->mm.bound_list); return count; } @@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) page_count = obj->base.size / PAGE_SIZE; if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - sg_free_table(st); kfree(st); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index e918b05fcbdd..7d5752fda5f1 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ret = i915_mutex_lock_interruptible(obj->base.dev); if (ret) - return ERR_PTR(ret); + goto err; ret = i915_gem_object_get_pages(obj); - if (ret) { - st = ERR_PTR(ret); - goto out; - } + if (ret) + goto err_unlock; + + i915_gem_object_pin_pages(obj); /* Copy sg so that we make an independent mapping */ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (st == NULL) { - st = ERR_PTR(-ENOMEM); - goto out; + ret = -ENOMEM; + goto err_unpin; } ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); - if (ret) { - kfree(st); - st = ERR_PTR(ret); - goto out; - } + if (ret) + goto err_free; src = obj->pages->sgl; dst = st->sgl; @@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme } if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - sg_free_table(st); - kfree(st); - st = ERR_PTR(-ENOMEM); - goto out; + ret =-ENOMEM; + goto err_free_sg; } - i915_gem_object_pin_pages(obj); - -out: mutex_unlock(&obj->base.dev->struct_mutex); return st; + +err_free_sg: + sg_free_table(st); +err_free: + kfree(st); +err_unpin: + i915_gem_object_unpin_pages(obj); +err_unlock: + mutex_unlock(&obj->base.dev->struct_mutex); +err: + return ERR_PTR(ret); } static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 792c52a235ee..bf345777ae9f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, else ret = relocate_entry_gtt(obj, reloc); + if (ret) + return ret; + /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 9969d10b80f5..e15a1d90037d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int bios_reserved = 0; + if (dev_priv->gtt.stolen_size == 0) + return 0; + dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); if (dev_priv->mm.stolen_base == 0) return 0; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 558e568d5b45..aba9d7498996 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, if (WARN_ON(ring->id != RCS)) return NULL; - obj = ring->private; + obj = ring->scratch.obj; if (acthd >= i915_gem_obj_ggtt_offset(obj) && acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) return i915_error_object_create(dev_priv, obj); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a03b445ceb5f..83cce0cdb769 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, dev_priv->display.hpd_irq_setup(dev); spin_unlock(&dev_priv->irq_lock); - queue_work(dev_priv->wq, - &dev_priv->hotplug_work); + /* + * Our hotplug handler can grab modeset locks (by calling down into the + * fb helpers). Hence it must not be run on our own dev-priv->wq work + * queue for otherwise the flush_work in the pageflip code will + * deadlock. + */ + schedule_work(&dev_priv->hotplug_work); } static void gmbus_irq_handler(struct drm_device *dev) @@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged) wake_up_all(&ring->irq_queue); } - queue_work(dev_priv->wq, &dev_priv->gpu_error.work); + /* + * Our reset work can grab modeset locks (since it needs to reset the + * state of outstanding pagelips). Hence it must not be run on our own + * dev-priv->wq work queue for otherwise the flush_work in the pageflip + * code will deadlock. + */ + schedule_work(&dev_priv->gpu_error.work); } static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) @@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data) for_each_ring(ring, dev_priv, i) { if (ring->hangcheck.score > FIRE) { - DRM_ERROR("%s on %s\n", - stuck[i] ? "stuck" : "no progress", - ring->name); + DRM_INFO("%s on %s\n", + stuck[i] ? "stuck" : "no progress", + ring->name); rings_hung++; } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b6a58f720f9a..c159e1a6810f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -33,21 +33,6 @@ #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) #define _MASKED_BIT_DISABLE(a) ((a) << 16) -/* - * The Bridge device's PCI config space has information about the - * fb aperture size and the amount of pre-reserved memory. - * This is all handled in the intel-gtt.ko module. i915.ko only - * cares about the vga bit for the vga rbiter. - */ -#define INTEL_GMCH_CTRL 0x52 -#define INTEL_GMCH_VGA_DISABLE (1 << 1) -#define SNB_GMCH_CTRL 0x50 -#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ -#define SNB_GMCH_GGMS_MASK 0x3 -#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ -#define SNB_GMCH_GMS_MASK 0x1f - - /* PCI config space */ #define HPLLCC 0xc0 /* 855 only */ @@ -245,6 +230,7 @@ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) +#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_INVALIDATE_TLB (1<<18) @@ -693,6 +679,23 @@ #define FPGA_DBG_RM_NOCLAIM (1<<31) #define DERRMR 0x44050 +#define DERRMR_PIPEA_SCANLINE (1<<0) +#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) +#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) +#define DERRMR_PIPEA_VBLANK (1<<3) +#define DERRMR_PIPEA_HBLANK (1<<5) +#define DERRMR_PIPEB_SCANLINE (1<<8) +#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) +#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) +#define DERRMR_PIPEB_VBLANK (1<<11) +#define DERRMR_PIPEB_HBLANK (1<<13) +/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ +#define DERRMR_PIPEC_SCANLINE (1<<14) +#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) +#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) +#define DERRMR_PIPEC_VBLANK (1<<21) +#define DERRMR_PIPEC_HBLANK (1<<22) + /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are @@ -3310,6 +3313,7 @@ #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) +#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) #define CURSOR_POS_MASK 0x007FF diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index a777e7f3b0df..c8c4112de110 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, return snprintf(buf, PAGE_SIZE, "%d\n", ret); } +static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return snprintf(buf, PAGE_SIZE, "%d\n", + vlv_gpu_freq(dev_priv->mem_freq, + dev_priv->rps.rpe_delay)); +} + static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); @@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); +static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); @@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = { NULL, }; +static const struct attribute *vlv_attrs[] = { + &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_max_freq_mhz.attr, + &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_vlv_rpe_freq_mhz.attr, + NULL, +}; + static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) @@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev) DRM_ERROR("l3 parity sysfs setup failed\n"); } - if (INTEL_INFO(dev)->gen >= 6) { + ret = 0; + if (IS_VALLEYVIEW(dev)) + ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); + else if (INTEL_INFO(dev)->gen >= 6) ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); - if (ret) - DRM_ERROR("gen6 sysfs setup failed\n"); - } + if (ret) + DRM_ERROR("RPS sysfs setup failed\n"); ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, &error_state_attr); @@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev) void i915_teardown_sysfs(struct drm_device *dev) { sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); - sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); + if (IS_VALLEYVIEW(dev)) + sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); + else + sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); #ifdef CONFIG_PM sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b5a3875f22c7..ea9022ef15d5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crt *crt = intel_attached_crt(connector); - if (HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 5) { u32 adpa; adpa = I915_READ(crt->adpa_reg); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 38452d82ac7d..2489d0b4c7d2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc, else dspcntr &= ~DISPPLANE_TILED; - /* must disable */ - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + if (IS_HASWELL(dev)) + dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; + else + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; I915_WRITE(reg, dspcntr); @@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev)) { cntl |= CURSOR_PIPE_CSC_ENABLE; + cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; + } I915_WRITE(CURCNTR_IVB(pipe), cntl); intel_crtc->cursor_visible = visible; @@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, } } - pipe_config->adjusted_mode.clock = clock.dot * - pipe_config->pixel_multiplier; + pipe_config->adjusted_mode.clock = clock.dot; } static void ironlake_crtc_clock_get(struct intel_crtc *crtc, @@ -7828,12 +7831,6 @@ err: return ret; } -/* - * On gen7 we currently use the blit ring because (in early silicon at least) - * the render ring doesn't give us interrpts for page flip completion, which - * means clients will hang after the first flip is queued. Fortunately the - * blit ring generates interrupts properly, so use it instead. - */ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + struct intel_ring_buffer *ring; uint32_t plane_bit = 0; - int ret; + int len, ret; + + ring = obj->ring; + if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS) + ring = &dev_priv->ring[BCS]; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) @@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev, goto err_unpin; } - ret = intel_ring_begin(ring, 4); + len = 4; + if (ring->id == RCS) + len += 6; + + ret = intel_ring_begin(ring, len); if (ret) goto err_unpin; + /* Unmask the flip-done completion message. Note that the bspec says that + * we should do this for both the BCS and RCS, and that we must not unmask + * more than one flip event at any time (or ensure that one flip message + * can be sent by waiting for flip-done prior to queueing new flips). + * Experimentation says that BCS works despite DERRMR masking all + * flip-done completion events and that unmasking all planes at once + * for the RCS also doesn't appear to drop events. Setting the DERRMR + * to zero does lead to lockups within MI_DISPLAY_FLIP. + */ + if (ring->id == RCS) { + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, DERRMR); + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + DERRMR_PIPEB_PRI_FLIP_DONE | + DERRMR_PIPEC_PRI_FLIP_DONE)); + intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); + intel_ring_emit(ring, DERRMR); + intel_ring_emit(ring, ring->scratch.gtt_offset + 256); + } + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); @@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev) POSTING_READ(vga_reg); } +static void i915_enable_vga_mem(struct drm_device *dev) +{ + /* Enable VGA memory on Intel HD */ + if (HAS_PCH_SPLIT(dev)) { + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | + VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | + VGA_RSRC_NORMAL_MEM); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + } +} + +void i915_disable_vga_mem(struct drm_device *dev) +{ + /* Disable VGA memory on Intel HD */ + if (HAS_PCH_SPLIT(dev)) { + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | + VGA_RSRC_NORMAL_IO | + VGA_RSRC_NORMAL_MEM); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + } +} + void intel_modeset_init_hw(struct drm_device *dev) { intel_init_power_well(dev); @@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev) if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); i915_disable_vga(dev); + i915_disable_vga_mem(dev); } } @@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_disable_fbc(dev); + i915_enable_vga_mem(dev); + intel_disable_gt_powersave(dev); ironlake_teardown_rc6(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 176080822a74..a47799e832c6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode); extern void intel_panel_fini(struct intel_panel *panel); -extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, +extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config, @@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev); extern void hsw_pc8_restore_interrupts(struct drm_device *dev); extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); +extern void i915_disable_vga_mem(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4d33278e31fb..831a5c021c4b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - struct drm_display_mode *fixed_mode = - lvds_encoder->attached_connector->base.panel.fixed_mode; + const struct drm_display_mode *adjusted_mode = + &crtc->config.adjusted_mode; int pipe = crtc->pipe; u32 temp; @@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) temp &= ~LVDS_ENABLE_DITHER; } temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) temp |= LVDS_HSYNC_POLARITY; - if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; I915_WRITE(lvds_encoder->reg, temp); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index cfb8fb68f09c..119771ff46ab 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) return ASLE_BACKLIGHT_FAILED; intel_panel_set_backlight(dev, bclp, 255); - iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); + iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); return 0; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a43c33bc4a35..42114ecbae0e 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -36,20 +36,12 @@ #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ void -intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, +intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) { - adjusted_mode->hdisplay = fixed_mode->hdisplay; - adjusted_mode->hsync_start = fixed_mode->hsync_start; - adjusted_mode->hsync_end = fixed_mode->hsync_end; - adjusted_mode->htotal = fixed_mode->htotal; + drm_mode_copy(adjusted_mode, fixed_mode); - adjusted_mode->vdisplay = fixed_mode->vdisplay; - adjusted_mode->vsync_start = fixed_mode->vsync_start; - adjusted_mode->vsync_end = fixed_mode->vsync_end; - adjusted_mode->vtotal = fixed_mode->vtotal; - - adjusted_mode->clock = fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, 0); } /* adjusted_mode has been preset to be the panel's fixed mode */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 46056820d1d2..0c115cc4899f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev) static void gen6_enable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + u32 enabled_intrs; spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); + /* only unmask PM interrupts we need. Mask all others. */ - I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); + enabled_intrs = GEN6_PM_RPS_EVENTS; + + /* IVB and SNB hard hangs on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + */ + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED; + + I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); } static void gen6_enable_rps(struct drm_device *dev) @@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - g4x_disable_trickle_feed(dev); - /* WaVSRefCountFullforceMissDisable:hsw */ gen7_setup_fixed_func_scheduler(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f05cceac5a52..460ee1026fca 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -33,16 +33,6 @@ #include "i915_trace.h" #include "intel_drv.h" -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -struct pipe_control { - struct drm_i915_gem_object *obj; - volatile u32 *cpu_page; - u32 gtt_offset; -}; - static inline int ring_space(struct intel_ring_buffer *ring) { int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); @@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring, static int intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) { - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; @@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; /* @@ -481,68 +468,43 @@ out: static int init_pipe_control(struct intel_ring_buffer *ring) { - struct pipe_control *pc; - struct drm_i915_gem_object *obj; int ret; - if (ring->private) + if (ring->scratch.obj) return 0; - pc = kmalloc(sizeof(*pc), GFP_KERNEL); - if (!pc) - return -ENOMEM; - - obj = i915_gem_alloc_object(ring->dev, 4096); - if (obj == NULL) { + ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); + if (ring->scratch.obj == NULL) { DRM_ERROR("Failed to allocate seqno page\n"); ret = -ENOMEM; goto err; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); if (ret) goto err_unref; - pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); - pc->cpu_page = kmap(sg_page(obj->pages->sgl)); - if (pc->cpu_page == NULL) { + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); + ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); + if (ring->scratch.cpu_page == NULL) { ret = -ENOMEM; goto err_unpin; } DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", - ring->name, pc->gtt_offset); - - pc->obj = obj; - ring->private = pc; + ring->name, ring->scratch.gtt_offset); return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin(ring->scratch.obj); err_unref: - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference(&ring->scratch.obj->base); err: - kfree(pc); return ret; } -static void -cleanup_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - struct drm_i915_gem_object *obj; - - obj = pc->obj; - - kunmap(sg_page(obj->pages->sgl)); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - - kfree(pc); -} - static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - if (!ring->private) + if (ring->scratch.obj == NULL) return; - if (HAS_BROKEN_CS_TLB(dev)) - drm_gem_object_unreference(to_gem_object(ring->private)); - - if (INTEL_INFO(dev)->gen >= 5) - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) { + kunmap(sg_page(ring->scratch.obj->pages->sgl)); + i915_gem_object_unpin(ring->scratch.obj); + } - ring->private = NULL; + drm_gem_object_unreference(&ring->scratch.obj->base); + ring->scratch.obj = NULL; } static void @@ -742,8 +704,7 @@ do { \ static int pc_render_add_request(struct intel_ring_buffer *ring) { - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; + u32 scratch_addr = ring->scratch.gtt_offset + 128; int ret; /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently @@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, 0); PIPE_CONTROL_FLUSH(ring, scratch_addr); @@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, ring->outstanding_lazy_request); intel_ring_emit(ring, 0); intel_ring_advance(ring); @@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) static u32 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; + return ring->scratch.cpu_page[0]; } static void pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) { - struct pipe_control *pc = ring->private; - pc->cpu_page[0] = seqno; + ring->scratch.cpu_page[0] = seqno; } static bool @@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); } else { - struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = i915_gem_obj_ggtt_offset(obj); + u32 cs_offset = ring->scratch.gtt_offset; if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return ret; } - ring->private = obj; + ring->scratch.obj = obj; + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); } return intel_init_ring_buffer(dev, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 432ad5311ba6..68b1ca974d59 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -155,7 +155,11 @@ struct intel_ring_buffer { struct intel_ring_hangcheck hangcheck; - void *private; + struct { + struct drm_i915_gem_object *obj; + u32 gtt_offset; + volatile u32 *cpu_page; + } scratch; }; static inline bool diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 317e058fb3cf..85037b9d4934 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = intel_encoder->base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + &crtc->config.adjusted_mode; + struct drm_display_mode *mode = &crtc->config.requested_mode; struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; @@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); + input_dtd.part1.clock /= crtc->config.pixel_multiplier; + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) DRM_INFO("Setting input timings on %s failed\n", SDVO_NAME(intel_sdvo)); - switch (intel_crtc->config.pixel_multiplier) { + switch (crtc->config.pixel_multiplier) { default: WARN(1, "unknown pixel mutlipler specified\n"); case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; @@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) } if (INTEL_PCH_TYPE(dev) >= PCH_CPT) - sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); + sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); else - sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); + sdvox |= SDVO_PIPE_SEL(crtc->pipe); if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; @@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { - sdvox |= (intel_crtc->config.pixel_multiplier - 1) + sdvox |= (crtc->config.pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 78b621cdd108..ad6ec4b39005 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (obj->tiling_mode != I915_TILING_NONE) sprctl |= SPRITE_TILED; - /* must disable */ - sprctl |= SPRITE_TRICKLE_FEED_DISABLE; + if (IS_HASWELL(dev)) + sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; + else + sprctl |= SPRITE_TRICKLE_FEED_DISABLE; + sprctl |= SPRITE_ENABLE; if (IS_HASWELL(dev)) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8f5bc869c023..8649f1c36b00 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev) } } -void intel_uncore_sanitize(struct drm_device *dev) +static void intel_uncore_forcewake_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) __gen6_gt_force_wake_mt_reset(dev_priv); } +} + +void intel_uncore_sanitize(struct drm_device *dev) +{ + intel_uncore_forcewake_reset(dev); /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_disable_gt_powersave(dev); @@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev) /* Spin waiting for the device to ack the reset request */ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + intel_uncore_forcewake_reset(dev); + /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->uncore.forcewake_count) dev_priv->uncore.funcs.force_wake_get(dev_priv); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8863644024b7..e893c5362402 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev) nouveau_fbcon_set_suspend(drm_dev, 0); nouveau_fbcon_zfill_all(drm_dev); - nouveau_display_resume(drm_dev); + if (drm_dev->mode_config.num_crtc) + nouveau_display_resume(drm_dev); nv_suspend_set_printk_level(NV_DBG_DEBUG); return 0; } @@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev) if (drm_dev->mode_config.num_crtc) nouveau_fbcon_set_suspend(drm_dev, 0); nouveau_fbcon_zfill_all(drm_dev); - nouveau_display_resume(drm_dev); + if (drm_dev->mode_config.num_crtc) + nouveau_display_resume(drm_dev); nv_suspend_set_printk_level(NV_DBG_DEBUG); return 0; } @@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev); - nouveau_display_resume(drm_dev); + if (drm_dev->mode_config.num_crtc) + nouveau_display_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); /* do magic */ nv_mask(device, 0x88488, (1 << 25), (1 << 25)); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index dfac7965ea28..32923d2f6002 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ - if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || + (drm_detect_hdmi_monitor(radeon_connector->edid) && + (radeon_connector->audio == RADEON_AUDIO_AUTO))) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; @@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: - if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || + (drm_detect_hdmi_monitor(radeon_connector->edid) && + (radeon_connector->audio == RADEON_AUDIO_AUTO))) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; @@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && - radeon_audio) + else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || + (drm_detect_hdmi_monitor(radeon_connector->edid) && + (radeon_connector->audio == RADEON_AUDIO_AUTO))) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; @@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) + /* some dce3.x boards have a bug in their transmitter control table. + * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE + * does the same thing and more. + */ + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && + (rdev->family != CHIP_RS880)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 084e69414fd1..05ff315e8e9e 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2340,12 +2340,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev) return ret; } - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 916630fdc796..899627443030 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4208,6 +4208,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic pi->pspp_notify_required = false; if (target_link_speed > current_link_speed) { switch (target_link_speed) { +#ifdef CONFIG_ACPI case RADEON_PCIE_GEN3: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) break; @@ -4217,6 +4218,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; +#endif default: pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); break; @@ -4248,7 +4250,9 @@ static void ci_notify_link_speed_change_after_state_change(struct radeon_device (ci_get_current_pcie_speed(rdev) > 0)) return; +#ifdef CONFIG_ACPI radeon_acpi_pcie_performance_request(rdev, request, false); +#endif } } @@ -4744,12 +4748,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) if (pi->pcie_performance_request) ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); - ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("ci_dpm_force_performance_level failed\n"); - return ret; - } - cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_SDMA | diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c index 53b43dd3cf1e..252e10a41cf5 100644 --- a/drivers/gpu/drm/radeon/ci_smc.c +++ b/drivers/gpu/drm/radeon/ci_smc.c @@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, u32 smc_start_address, const u8 *src, u32 byte_count, u32 limit) { + unsigned long flags; u32 data, original_data; u32 addr; u32 extra_shift; - int ret; + int ret = 0; if (smc_start_address & 3) return -EINVAL; @@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, addr = smc_start_address; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); while (byte_count >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; ret = ci_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_IND_DATA_0, data); @@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, ret = ci_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; original_data = RREG32(SMC_IND_DATA_0); @@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev, ret = ci_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_IND_DATA_0, data); } - return 0; + +done: + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); + + return ret; } void ci_start_smc(struct radeon_device *rdev) @@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev) int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) { + unsigned long flags; u32 ucode_start_address; u32 ucode_size; const u8 *src; @@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) return -EINVAL; src = (const u8 *)rdev->smc_fw->data; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); while (ucode_size >= 4) { @@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) ucode_size -= 4; } WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return 0; } @@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) int ci_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 *value, u32 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = ci_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; + if (ret == 0) + *value = RREG32(SMC_IND_DATA_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - *value = RREG32(SMC_IND_DATA_0); - return 0; + return ret; } int ci_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 value, u32 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = ci_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; + if (ret == 0) + WREG32(SMC_IND_DATA_0, value); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - WREG32(SMC_IND_DATA_0, value); - return 0; + return ret; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a3bba0587276..adbdb6503b05 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev); +static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable); /* get temperature in millidegrees */ int ci_get_temp(struct radeon_device *rdev) @@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev) */ u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); WREG32(PCIE_INDEX, reg); (void)RREG32(PCIE_INDEX); r = RREG32(PCIE_DATA); + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); return r; } void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); WREG32(PCIE_INDEX, reg); (void)RREG32(PCIE_INDEX); WREG32(PCIE_DATA, v); (void)RREG32(PCIE_DATA); + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); } static const u32 spectre_rlc_save_restore_register_list[] = @@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev) } else if ((rdev->pdev->device == 0x1309) || (rdev->pdev->device == 0x130A) || (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313)) { + (rdev->pdev->device == 0x1313) || + (rdev->pdev->device == 0x131D)) { rdev->config.cik.max_cu_per_sh = 6; rdev->config.cik.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x1306) || @@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev) { int r; + cik_enable_gui_idle_interrupt(rdev, false); + r = cik_cp_load_microcode(rdev); if (r) return r; @@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev) if (r) return r; + cik_enable_gui_idle_interrupt(rdev, true); + return 0; } @@ -5376,7 +5390,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev, void cik_update_cg(struct radeon_device *rdev, u32 block, bool enable) { + if (block & RADEON_CG_BLOCK_GFX) { + cik_enable_gui_idle_interrupt(rdev, false); /* order matters! */ if (enable) { cik_enable_mgcg(rdev, true); @@ -5385,6 +5401,7 @@ void cik_update_cg(struct radeon_device *rdev, cik_enable_cgcg(rdev, false); cik_enable_mgcg(rdev, false); } + cik_enable_gui_idle_interrupt(rdev, true); } if (block & RADEON_CG_BLOCK_MC) { @@ -5541,7 +5558,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev, { u32 data, orig; - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { orig = data = RREG32(RLC_PG_CNTL); data |= GFX_PG_ENABLE; if (orig != data) @@ -5805,7 +5822,7 @@ static void cik_init_pg(struct radeon_device *rdev) if (rdev->pg_flags) { cik_enable_sck_slowdown_on_pu(rdev, true); cik_enable_sck_slowdown_on_pd(rdev, true); - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { cik_init_gfx_cgpg(rdev); cik_enable_cp_pg(rdev, true); cik_enable_gds_pg(rdev, true); @@ -5819,7 +5836,7 @@ static void cik_fini_pg(struct radeon_device *rdev) { if (rdev->pg_flags) { cik_update_gfx_pg(rdev, false); - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { cik_enable_cp_pg(rdev, false); cik_enable_gds_pg(rdev, false); } @@ -5895,7 +5912,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) u32 tmp; /* gfx ring */ - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); /* sdma */ tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); @@ -6036,8 +6055,7 @@ static int cik_irq_init(struct radeon_device *rdev) */ int cik_irq_set(struct radeon_device *rdev) { - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | - PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; + u32 cp_int_cntl; u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; @@ -6058,6 +6076,10 @@ int cik_irq_set(struct radeon_device *rdev) return 0; } + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 95a66db08d9b..91bb470de0a3 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev) if (eg_pi->pcie_performance_request) cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 8953255e894b..85a69d2ea3d2 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -28,22 +28,30 @@ static u32 dce6_endpoint_rreg(struct radeon_device *rdev, u32 block_offset, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->end_idx_lock, flags); WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); + return r; } static void dce6_endpoint_wreg(struct radeon_device *rdev, u32 block_offset, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->end_idx_lock, flags); if (ASIC_IS_DCE8(rdev)) WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); else WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); } #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) @@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 offset = dig->afmt->offset; - u32 id = dig->afmt->pin->id; if (!dig->afmt->pin) return; - WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, + AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); } void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index ecd60809db4e..71399065db04 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev); static void kv_enable_new_levels(struct radeon_device *rdev); static void kv_program_nbps_index_settings(struct radeon_device *rdev, struct radeon_ps *new_rps); +static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); static int kv_set_enabled_levels(struct radeon_device *rdev); static int kv_force_dpm_highest(struct radeon_device *rdev); static int kv_force_dpm_lowest(struct radeon_device *rdev); @@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev) static void kv_program_vc(struct radeon_device *rdev) { - WREG32_SMC(CG_FTV_0, 0x3FFFC000); + WREG32_SMC(CG_FTV_0, 0x3FFFC100); } static void kv_clear_vc(struct radeon_device *rdev) @@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) static int kv_unforce_levels(struct radeon_device *rdev) { - return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); + if (rdev->family == CHIP_KABINI) + return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); + else + return kv_set_enabled_levels(rdev); } static int kv_update_sclk_t(struct radeon_device *rdev) @@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev) &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; if (table && table->count) { - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { - if ((table->entries[i].clk == pi->boot_pl.sclk) || - (i == 0)) + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk == pi->boot_pl.sclk) break; } @@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev) if (table->num_max_dpm_entries == 0) return -EINVAL; - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { - if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || - (i == 0)) + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) break; } @@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable) PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); } +static void kv_reset_acp_boot_level(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + + pi->acp_boot_level = 0xff; +} + static void kv_update_current_ps(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev, pi->requested_rps.ps_priv = &pi->requested_ps; } +void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + int ret; + + if (pi->bapm_enable) { + ret = kv_smc_bapm_enable(rdev, enable); + if (ret) + DRM_ERROR("kv_smc_bapm_enable failed\n"); + } +} + int kv_dpm_enable(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev) return ret; } + kv_reset_acp_boot_level(rdev); + if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); @@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev) radeon_irq_set(rdev); } + ret = kv_smc_bapm_enable(rdev, false); + if (ret) { + DRM_ERROR("kv_smc_bapm_enable failed\n"); + return ret; + } + /* powerdown unused blocks for now */ kv_dpm_powergate_acp(rdev, true); kv_dpm_powergate_samu(rdev, true); @@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev) RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_HDP), false); + kv_smc_bapm_enable(rdev, false); + /* powerup blocks */ kv_dpm_powergate_acp(rdev, false); kv_dpm_powergate_samu(rdev, false); @@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) return kv_enable_samu_dpm(rdev, !gate); } +static u8 kv_get_acp_boot_level(struct radeon_device *rdev) +{ + u8 i; + struct radeon_clock_voltage_dependency_table *table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].clk >= 0) /* XXX */ + break; + } + + if (i >= table->count) + i = table->count - 1; + + return i; +} + +static void kv_update_acp_boot_level(struct radeon_device *rdev) +{ + struct kv_power_info *pi = kv_get_pi(rdev); + u8 acp_boot_level; + + if (!pi->caps_stable_p_state) { + acp_boot_level = kv_get_acp_boot_level(rdev); + if (acp_boot_level != pi->acp_boot_level) { + pi->acp_boot_level = acp_boot_level; + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + } +} + static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) if (pi->caps_stable_p_state) pi->acp_boot_level = table->count - 1; else - pi->acp_boot_level = 0; + pi->acp_boot_level = kv_get_acp_boot_level(rdev); ret = kv_copy_bytes_to_smc(rdev, pi->dpm_table_start + @@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev, } } - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { - if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || - (i == 0)) { - pi->highest_valid = i; + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) break; - } } + pi->highest_valid = i; if (pi->lowest_valid > pi->highest_valid) { if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > @@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev, } } - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { if (table->entries[i].sclk_frequency <= - new_ps->levels[new_ps->num_levels - 1].sclk || - i == 0) { - pi->highest_valid = i; + new_ps->levels[new_ps->num_levels - 1].sclk) break; - } } + pi->highest_valid = i; if (pi->lowest_valid > pi->highest_valid) { if ((new_ps->levels[0].sclk - @@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_HDP), false); + if (pi->bapm_enable) { + ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); + if (ret) { + DRM_ERROR("kv_smc_bapm_enable failed\n"); + return ret; + } + } + if (rdev->family == CHIP_KABINI) { if (pi->enable_dpm) { kv_set_valid_clock_range(rdev, new_ps); @@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) return ret; } #endif + kv_update_acp_boot_level(rdev); kv_update_sclk_t(rdev); kv_enable_nb_dpm(rdev); } @@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_HDP), true); - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; return 0; } @@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev) void kv_dpm_reset_asic(struct radeon_device *rdev) { - kv_force_lowest_valid(rdev); - kv_init_graphics_levels(rdev); - kv_program_bootup_state(rdev); - kv_upload_dpm_settings(rdev); - kv_force_lowest_valid(rdev); - kv_unforce_levels(rdev); + struct kv_power_info *pi = kv_get_pi(rdev); + + if (rdev->family == CHIP_KABINI) { + kv_force_lowest_valid(rdev); + kv_init_graphics_levels(rdev); + kv_program_bootup_state(rdev); + kv_upload_dpm_settings(rdev); + kv_force_lowest_valid(rdev); + kv_unforce_levels(rdev); + } else { + kv_init_graphics_levels(rdev); + kv_program_bootup_state(rdev); + kv_freeze_sclk_dpm(rdev, true); + kv_upload_dpm_settings(rdev); + kv_freeze_sclk_dpm(rdev, false); + kv_set_enabled_level(rdev, pi->graphics_boot_level); + } } //XXX use sumo_dpm_display_configuration_changed @@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) if (ret) return ret; - for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { if (enable_mask & (1 << i)) break; } - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); + if (rdev->family == CHIP_KABINI) + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(rdev, i); } static int kv_force_dpm_lowest(struct radeon_device *rdev) @@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) break; } - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); + if (rdev->family == CHIP_KABINI) + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(rdev, i); } static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, @@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, if (!pi->caps_sclk_ds) return 0; - for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { temp = sclk / sumo_get_sleep_divider_from_id(i); - if ((temp >= min) || (i == 0)) + if (temp >= min) break; } @@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, ps->dpmx_nb_ps_lo = 0x1; ps->dpmx_nb_ps_hi = 0x0; } else { - ps->dpm0_pg_nb_ps_lo = 0x1; + ps->dpm0_pg_nb_ps_lo = 0x3; ps->dpm0_pg_nb_ps_hi = 0x0; - ps->dpmx_nb_ps_lo = 0x2; - ps->dpmx_nb_ps_hi = 0x1; + ps->dpmx_nb_ps_lo = 0x3; + ps->dpmx_nb_ps_hi = 0x0; - if (pi->sys_info.nb_dpm_enable && pi->battery_state) { + if (pi->sys_info.nb_dpm_enable) { force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->disable_nb_ps3_in_battery; @@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev) } } +static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) +{ + u32 new_mask = (1 << level); + + return kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + static int kv_set_enabled_levels(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h index 32bb079572d7..8cef7525d7a8 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.h +++ b/drivers/gpu/drm/radeon/kv_dpm.h @@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev, int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 *value, u32 limit); int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); +int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable); int kv_copy_bytes_to_smc(struct radeon_device *rdev, u32 smc_start_address, const u8 *src, u32 byte_count, u32 limit); diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c index 34a226d7e34a..0000b59a6d05 100644 --- a/drivers/gpu/drm/radeon/kv_smc.c +++ b/drivers/gpu/drm/radeon/kv_smc.c @@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable) return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); } +int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable) +{ + if (enable) + return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); + else + return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); +} + int kv_copy_bytes_to_smc(struct radeon_device *rdev, u32 smc_start_address, const u8 *src, u32 byte_count, u32 limit) diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index f7b625c9e0e9..6c398a456d78 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -3865,12 +3865,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev) return ret; } - ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("ni_dpm_force_performance_level failed\n"); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h index 682842804bce..5670b8291285 100644 --- a/drivers/gpu/drm/radeon/ppsmc.h +++ b/drivers/gpu/drm/radeon/ppsmc.h @@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result; #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) +#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 9fc61dd68bc0..24175717307b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev) uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t data; + spin_lock_irqsave(&rdev->pll_idx_lock, flags); WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); r100_pll_errata_after_index(rdev); data = RREG32(RADEON_CLOCK_CNTL_DATA); r100_pll_errata_after_data(rdev); + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); return data; } void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pll_idx_lock, flags); WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); r100_pll_errata_after_index(rdev); WREG32(RADEON_CLOCK_CNTL_DATA, v); r100_pll_errata_after_data(rdev); + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); } static void r100_set_safe_registers(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4e796ecf9ea4..6edf2b3a52b4 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev) u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); r = RREG32(R_0001FC_MC_IND_DATA); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); return r; } void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | S_0001F8_MC_IND_WR_EN(1)); WREG32(R_0001FC_MC_IND_DATA, v); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } static void r420_debugfs(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ea4d3734e6d9..2a1b1876b431 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev) return rdev->clock.spll.reference_freq; } +int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) +{ + return 0; +} + /* get temperature in millidegrees */ int rv6xx_get_temp(struct radeon_device *rdev) { @@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t r; + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); r = RREG32(R_0028FC_MC_DATA); WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); return r; } void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | S_0028F8_MC_IND_WR_EN(1)); WREG32(R_0028FC_MC_DATA, v); WREG32(R_0028F8_MC_INDEX, 0x7F); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } static void r600_mc_program(struct radeon_device *rdev) @@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev) */ u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); (void)RREG32(PCIE_PORT_INDEX); r = RREG32(PCIE_PORT_DATA); + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); return r; } void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); (void)RREG32(PCIE_PORT_INDEX); WREG32(PCIE_PORT_DATA, (v)); (void)RREG32(PCIE_PORT_DATA); + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); } /* diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index fa0de46fcc0d..e65f211a7be0 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) void r600_free_extended_power_table(struct radeon_device *rdev) { - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); - if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); - if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); - if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); - if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) - kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); - if (rdev->pm.dpm.dyn_state.ppm_table) - kfree(rdev->pm.dpm.dyn_state.ppm_table); - if (rdev->pm.dpm.dyn_state.cac_tdp_table) - kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); - if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); - if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) - kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); - if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) - kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); - if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) - kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries); + struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; + + kfree(dyn_state->vddc_dependency_on_sclk.entries); + kfree(dyn_state->vddci_dependency_on_mclk.entries); + kfree(dyn_state->vddc_dependency_on_mclk.entries); + kfree(dyn_state->mvdd_dependency_on_mclk.entries); + kfree(dyn_state->cac_leakage_table.entries); + kfree(dyn_state->phase_shedding_limits_table.entries); + kfree(dyn_state->ppm_table); + kfree(dyn_state->cac_tdp_table); + kfree(dyn_state->vce_clock_voltage_dependency_table.entries); + kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); + kfree(dyn_state->samu_clock_voltage_dependency_table.entries); + kfree(dyn_state->acp_clock_voltage_dependency_table.entries); } enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 454f90a849e4..e673fe26ea84 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1040,7 +1040,7 @@ # define HDMI0_AVI_INFO_CONT (1 << 1) # define HDMI0_AUDIO_INFO_SEND (1 << 4) # define HDMI0_AUDIO_INFO_CONT (1 << 5) -# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ +# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */ # define HDMI0_AUDIO_INFO_UPDATE (1 << 7) # define HDMI0_MPEG_INFO_SEND (1 << 8) # define HDMI0_MPEG_INFO_CONT (1 << 9) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ff8b564ce2b2..a400ac1c4147 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -181,7 +181,7 @@ extern int radeon_aspm; #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) /* PG flags */ -#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) +#define RADEON_PG_SUPPORT_GFX_PG (1 << 0) #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) #define RADEON_PG_SUPPORT_UVD (1 << 3) @@ -1778,6 +1778,7 @@ struct radeon_asic { int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); bool (*vblank_too_short)(struct radeon_device *rdev); void (*powergate_uvd)(struct radeon_device *rdev, bool gate); + void (*enable_bapm)(struct radeon_device *rdev, bool enable); } dpm; /* pageflipping */ struct { @@ -2110,6 +2111,28 @@ struct radeon_device { resource_size_t rmmio_size; /* protects concurrent MM_INDEX/DATA based register access */ spinlock_t mmio_idx_lock; + /* protects concurrent SMC based register access */ + spinlock_t smc_idx_lock; + /* protects concurrent PLL register access */ + spinlock_t pll_idx_lock; + /* protects concurrent MC register access */ + spinlock_t mc_idx_lock; + /* protects concurrent PCIE register access */ + spinlock_t pcie_idx_lock; + /* protects concurrent PCIE_PORT register access */ + spinlock_t pciep_idx_lock; + /* protects concurrent PIF register access */ + spinlock_t pif_idx_lock; + /* protects concurrent CG register access */ + spinlock_t cg_idx_lock; + /* protects concurrent UVD register access */ + spinlock_t uvd_idx_lock; + /* protects concurrent RCU register access */ + spinlock_t rcu_idx_lock; + /* protects concurrent DIDT register access */ + spinlock_t didt_idx_lock; + /* protects concurrent ENDPOINT (audio) register access */ + spinlock_t end_idx_lock; void __iomem *rmmio; radeon_rreg_t mc_rreg; radeon_wreg_t mc_wreg; @@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); */ static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t r; + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); r = RREG32(RADEON_PCIE_DATA); + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); return r; } static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); WREG32(RADEON_PCIE_DATA, (v)); + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); } static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(TN_SMC_IND_INDEX_0, (reg)); r = RREG32(TN_SMC_IND_DATA_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return r; } static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(TN_SMC_IND_INDEX_0, (reg)); WREG32(TN_SMC_IND_DATA_0, (v)); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); } static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); r = RREG32(R600_RCU_DATA); + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); return r; } static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); WREG32(R600_RCU_DATA, (v)); + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); } static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->cg_idx_lock, flags); WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); r = RREG32(EVERGREEN_CG_IND_DATA); + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); return r; } static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->cg_idx_lock, flags); WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); WREG32(EVERGREEN_CG_IND_DATA, (v)); + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); } static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->pif_idx_lock, flags); WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); r = RREG32(EVERGREEN_PIF_PHY0_DATA); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); return r; } static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); } static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->pif_idx_lock, flags); WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); r = RREG32(EVERGREEN_PIF_PHY1_DATA); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); return r; } static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); } static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); r = RREG32(R600_UVD_CTX_DATA); + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); return r; } static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); WREG32(R600_UVD_CTX_DATA, (v)); + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); } static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) { + unsigned long flags; u32 r; + spin_lock_irqsave(&rdev->didt_idx_lock, flags); WREG32(CIK_DIDT_IND_INDEX, (reg)); r = RREG32(CIK_DIDT_IND_DATA); + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); return r; } static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->didt_idx_lock, flags); WREG32(CIK_DIDT_IND_INDEX, (reg)); WREG32(CIK_DIDT_IND_DATA, (v)); + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); } void r100_pll_errata_after_index(struct radeon_device *rdev); @@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) +#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 630853b96841..5003385a7512 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1037,6 +1037,7 @@ static struct radeon_asic rv6xx_asic = { .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = NULL, .get_temperature = &rv6xx_get_temp, + .set_uvd_clocks = &r600_set_uvd_clocks, }, .dpm = { .init = &rv6xx_dpm_init, @@ -1126,6 +1127,7 @@ static struct radeon_asic rs780_asic = { .set_pcie_lanes = NULL, .set_clock_gating = NULL, .get_temperature = &rv6xx_get_temp, + .set_uvd_clocks = &r600_set_uvd_clocks, }, .dpm = { .init = &rs780_dpm_init, @@ -1141,6 +1143,7 @@ static struct radeon_asic rs780_asic = { .get_mclk = &rs780_dpm_get_mclk, .print_power_state = &rs780_dpm_print_power_state, .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, + .force_performance_level = &rs780_dpm_force_performance_level, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, @@ -1791,6 +1794,7 @@ static struct radeon_asic trinity_asic = { .print_power_state = &trinity_dpm_print_power_state, .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, .force_performance_level = &trinity_dpm_force_performance_level, + .enable_bapm = &trinity_dpm_enable_bapm, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, @@ -2166,6 +2170,7 @@ static struct radeon_asic kv_asic = { .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, .force_performance_level = &kv_dpm_force_performance_level, .powergate_uvd = &kv_dpm_powergate_uvd, + .enable_bapm = &kv_dpm_enable_bapm, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, @@ -2390,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev) RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_MGCG; rdev->pg_flags = 0 | - /*RADEON_PG_SUPPORT_GFX_CG | */ + /*RADEON_PG_SUPPORT_GFX_PG | */ RADEON_PG_SUPPORT_SDMA; break; case CHIP_OLAND: @@ -2479,7 +2484,7 @@ int radeon_asic_init(struct radeon_device *rdev) RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_MGCG; rdev->pg_flags = 0; - /*RADEON_PG_SUPPORT_GFX_CG | + /*RADEON_PG_SUPPORT_GFX_PG | RADEON_PG_SUPPORT_GFX_SMG | RADEON_PG_SUPPORT_GFX_DMG | RADEON_PG_SUPPORT_UVD | @@ -2507,7 +2512,7 @@ int radeon_asic_init(struct radeon_device *rdev) RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_MGCG; rdev->pg_flags = 0; - /*RADEON_PG_SUPPORT_GFX_CG | + /*RADEON_PG_SUPPORT_GFX_PG | RADEON_PG_SUPPORT_GFX_SMG | RADEON_PG_SUPPORT_UVD | RADEON_PG_SUPPORT_VCE | diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 818bbe6b884b..70c29d5e080d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); u32 r600_get_xclk(struct radeon_device *rdev); uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); int rv6xx_get_temp(struct radeon_device *rdev); +int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int r600_dpm_pre_set_power_state(struct radeon_device *rdev); void r600_dpm_post_set_power_state(struct radeon_device *rdev); /* r600 dma */ @@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); +int rs780_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level); /* * rv770,rv730,rv710,rv740 @@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r struct seq_file *m); int trinity_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); +void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); /* DCE6 - SI */ void dce6_bandwidth_update(struct radeon_device *rdev); @@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, int kv_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level); void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); +void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); /* uvd v1.0 */ uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2399f25ec037..79159b5da05b 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct } } + if (property == rdev->mode_info.audio_property) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_connector->audio != val) { + radeon_connector->audio = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + if (property == rdev->mode_info.underscan_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); @@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { - /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ + /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ if (radeon_ddc_probe(radeon_connector, false)) ret = connector_status_connected; } @@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .force = radeon_dvi_force, }; +static const struct drm_connector_funcs radeon_edp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = radeon_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = radeon_lvds_set_property, + .destroy = radeon_dp_connector_destroy, + .force = radeon_dvi_force, +}; + +static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = radeon_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = radeon_lvds_set_property, + .destroy = radeon_dp_connector_destroy, + .force = radeon_dvi_force, +}; + void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, @@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev, goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); - drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ if (connector_type == DRM_MODE_CONNECTOR_eDP) @@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: default: + drm_connector_init(dev, &radeon_connector->base, + &radeon_dp_connector_funcs, connector_type); + drm_connector_helper_add(&radeon_connector->base, + &radeon_dp_connector_helper_funcs); connector->interlace_allowed = true; connector->doublescan_allowed = true; radeon_connector->dac_load_detect = true; @@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_DisplayPort: + drm_connector_init(dev, &radeon_connector->base, + &radeon_dp_connector_funcs, connector_type); + drm_connector_helper_add(&radeon_connector->base, + &radeon_dp_connector_helper_funcs); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); @@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.audio_property, + RADEON_AUDIO_DISABLE); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) @@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev, break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: + drm_connector_init(dev, &radeon_connector->base, + &radeon_lvds_bridge_connector_funcs, connector_type); + drm_connector_helper_add(&radeon_connector->base, + &radeon_dp_connector_helper_funcs); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); @@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.underscan_vborder_property, 0); } + if (ASIC_IS_DCE2(rdev)) { + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.audio_property, + RADEON_AUDIO_DISABLE); + } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, @@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.underscan_vborder_property, 0); } + if (ASIC_IS_DCE2(rdev)) { + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.audio_property, + RADEON_AUDIO_DISABLE); + } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) @@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.underscan_vborder_property, 0); } + if (ASIC_IS_DCE2(rdev)) { + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.audio_property, + RADEON_AUDIO_DISABLE); + } connector->interlace_allowed = true; /* in theory with a DP to VGA converter... */ connector->doublescan_allowed = false; @@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev, goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); + drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { /* add DP i2c bus */ diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a56084410372..ac6ece61a476 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -28,6 +28,7 @@ #include <drm/radeon_drm.h> #include "radeon_reg.h" #include "radeon.h" +#include "radeon_trace.h" static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) { @@ -80,9 +81,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].lobj.bo = p->relocs[i].robj; p->relocs[i].lobj.written = !!r->write_domain; - /* the first reloc of an UVD job is the - msg and that must be in VRAM */ - if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { + /* the first reloc of an UVD job is the msg and that must be in + VRAM, also but everything into VRAM on AGP cards to avoid + image corruptions */ + if (p->ring == R600_RING_TYPE_UVD_INDEX && + (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { /* TODO: is this still needed for NI+ ? */ p->relocs[i].lobj.domain = RADEON_GEM_DOMAIN_VRAM; @@ -559,6 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; } + trace_radeon_cs(&parser); + r = radeon_cs_ib_chunk(rdev, &parser); if (r) { goto out; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 16cb8792b1e6..e29faa73b574 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev, /* Registers mapping */ /* TODO: block userspace mapping of io register */ spin_lock_init(&rdev->mmio_idx_lock); + spin_lock_init(&rdev->smc_idx_lock); + spin_lock_init(&rdev->pll_idx_lock); + spin_lock_init(&rdev->mc_idx_lock); + spin_lock_init(&rdev->pcie_idx_lock); + spin_lock_init(&rdev->pciep_idx_lock); + spin_lock_init(&rdev->pif_idx_lock); + spin_lock_init(&rdev->cg_idx_lock); + spin_lock_init(&rdev->uvd_idx_lock); + spin_lock_init(&rdev->rcu_idx_lock); + spin_lock_init(&rdev->didt_idx_lock); + spin_lock_init(&rdev->end_idx_lock); if (rdev->family >= CHIP_BONAIRE) { rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b055bddaa94c..0d1aa050d41d 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] = { UNDERSCAN_AUTO, "auto" }, }; +static struct drm_prop_enum_list radeon_audio_enum_list[] = +{ { RADEON_AUDIO_DISABLE, "off" }, + { RADEON_AUDIO_ENABLE, "on" }, + { RADEON_AUDIO_AUTO, "auto" }, +}; + static int radeon_modeset_create_props(struct radeon_device *rdev) { int sz; @@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) if (!rdev->mode_info.underscan_vborder_property) return -ENOMEM; + sz = ARRAY_SIZE(radeon_audio_enum_list); + rdev->mode_info.audio_property = + drm_property_create_enum(rdev->ddev, 0, + "audio", + radeon_audio_enum_list, sz); + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cb4445f55a96..cdd12dcd988b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -153,7 +153,7 @@ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; -int radeon_audio = 0; +int radeon_audio = 1; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; int radeon_pcie_gen2 = -1; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d908d8d68f6b..ef63d3f00b2f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -247,6 +247,8 @@ struct radeon_mode_info { struct drm_property *underscan_property; struct drm_property *underscan_hborder_property; struct drm_property *underscan_vborder_property; + /* audio */ + struct drm_property *audio_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; @@ -471,6 +473,12 @@ struct radeon_router { u8 cd_mux_state; }; +enum radeon_connector_audio { + RADEON_AUDIO_DISABLE = 0, + RADEON_AUDIO_ENABLE = 1, + RADEON_AUDIO_AUTO = 2 +}; + struct radeon_connector { struct drm_connector base; uint32_t connector_id; @@ -489,6 +497,7 @@ struct radeon_connector { struct radeon_hpd hpd; struct radeon_router router; struct radeon_i2c_chan *router_bus; + enum radeon_connector_audio audio; }; struct radeon_framebuffer { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index d7555369a3e5..87e1d69e8fdb 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev, void radeon_pm_acpi_event_handler(struct radeon_device *rdev) { - if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + mutex_lock(&rdev->pm.mutex); + if (power_supply_is_system_supplied() > 0) + rdev->pm.dpm.ac_power = true; + else + rdev->pm.dpm.ac_power = false; + if (rdev->asic->dpm.enable_bapm) + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + mutex_unlock(&rdev->pm.mutex); + } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { mutex_lock(&rdev->pm.mutex); radeon_pm_update_profile(rdev); @@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; int cp = rdev->pm.profile; @@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; mutex_lock(&rdev->pm.mutex); @@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; int pm = rdev->pm.pm_method; @@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; /* we don't support the legacy modes with dpm */ @@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; @@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; mutex_lock(&rdev->pm.mutex); @@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; @@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level; int ret = 0; @@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; int temp; @@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", temp); } +static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct radeon_device *rdev = ddev->dev_private; + int hyst = to_sensor_dev_attr(attr)->index; + int temp; + + if (hyst) + temp = rdev->pm.dpm.thermal.min_temp; + else + temp = rdev->pm.dpm.thermal.max_temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp); +} + static ssize_t radeon_hwmon_show_name(struct device *dev, struct device_attribute *attr, char *buf) @@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev, } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, NULL }; +static umode_t hwmon_attributes_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct radeon_device *rdev = ddev->dev_private; + + /* Skip limit attributes if DPM is not enabled */ + if (rdev->pm.pm_method != PM_METHOD_DPM && + (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + return 0; + + return attr->mode; +} + static const struct attribute_group hwmon_attrgroup = { .attrs = hwmon_attributes, + .is_visible = hwmon_attributes_visible, }; static int radeon_hwmon_init(struct radeon_device *rdev) @@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) radeon_dpm_post_set_power_state(rdev); - /* force low perf level for thermal */ - if (rdev->pm.dpm.thermal_active && - rdev->asic->dpm.force_performance_level) { - radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); + if (rdev->asic->dpm.force_performance_level) { + if (rdev->pm.dpm.thermal_active) + /* force low perf level for thermal */ + radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); + else + /* otherwise, enable auto */ + radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); } done: @@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) { int ret; - /* default to performance state */ + /* default to balanced state */ rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; rdev->pm.default_sclk = rdev->clock.default_sclk; rdev->pm.default_mclk = rdev->clock.default_mclk; rdev->pm.current_sclk = rdev->clock.default_sclk; diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index eafd8160a155..f7e367815964 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create, TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) ); +TRACE_EVENT(radeon_cs, + TP_PROTO(struct radeon_cs_parser *p), + TP_ARGS(p), + TP_STRUCT__entry( + __field(u32, ring) + __field(u32, dw) + __field(u32, fences) + ), + + TP_fast_assign( + __entry->ring = p->ring; + __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; + __entry->fences = radeon_fence_count_emitted( + p->rdev, p->ring); + ), + TP_printk("ring=%u, dw=%u, fences=%u", + __entry->ring, __entry->dw, + __entry->fences) +); + DECLARE_EVENT_CLASS(radeon_fence_request, TP_PROTO(struct drm_device *dev, u32 seqno), @@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, TP_ARGS(dev, seqno) ); -DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, - - TP_PROTO(struct drm_device *dev, u32 seqno), - - TP_ARGS(dev, seqno) -); - DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, TP_PROTO(struct drm_device *dev, u32 seqno), diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b8074a8ec75a..9566b5940a5a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev) uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t r; + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(RS480_NB_MC_INDEX, reg & 0xff); r = RREG32(RS480_NB_MC_DATA); WREG32(RS480_NB_MC_INDEX, 0xff); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); return r; } void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); WREG32(RS480_NB_MC_DATA, (v)); WREG32(RS480_NB_MC_INDEX, 0xff); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 670b555d2ca2..6acba8017b9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev) uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | S_000070_MC_IND_CITF_ARB0(1)); - return RREG32(R_000074_MC_IND_DATA); + r = RREG32(R_000074_MC_IND_DATA); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); + return r; } void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); WREG32(R_000074_MC_IND_DATA, v); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } static void rs600_debugfs(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index d8ddfb34545d..1447d794c22a 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev) uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t r; + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); r = RREG32(R_00007C_MC_DATA); WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); return r; } void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | S_000078_MC_IND_WR_EN(1)); WREG32(R_00007C_MC_DATA, v); WREG32(R_000078_MC_INDEX, 0x7F); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } static void rs690_mc_program(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index d1a1ce73bd45..6af8505cf4d2 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev) radeon_crtc = to_radeon_crtc(crtc); pi->crtc_id = radeon_crtc->crtc_id; if (crtc->mode.htotal && crtc->mode.vtotal) - pi->refresh_rate = - (crtc->mode.clock * 1000) / - (crtc->mode.htotal * crtc->mode.vtotal); + pi->refresh_rate = drm_mode_vrefresh(&crtc->mode); break; } } @@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev) WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); } -static void rs780_force_voltage_to_high(struct radeon_device *rdev) +static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage) { - struct igp_power_info *pi = rs780_get_pi(rdev); struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && @@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev) udelay(1); WREG32_P(FVTHROT_PWM_CTRL_REG0, - STARTING_PWM_HIGHTIME(pi->max_voltage), + STARTING_PWM_HIGHTIME(voltage), ~STARTING_PWM_HIGHTIME_MASK); WREG32_P(FVTHROT_PWM_CTRL_REG0, @@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev) WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); } +static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div) +{ + struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); + + if (current_state->sclk_low == current_state->sclk_high) + return; + + WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); + + WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div), + ~FORCED_FEEDBACK_DIV_MASK); + WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div), + ~STARTING_FEEDBACK_DIV_MASK); + WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); + + udelay(100); + + WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); +} + static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, struct radeon_ps *new_ps, struct radeon_ps *old_ps) @@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, if (ret) return ret; - WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); - - WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), - ~FORCED_FEEDBACK_DIV_MASK); - WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), - ~STARTING_FEEDBACK_DIV_MASK); - WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); - - udelay(100); + if ((min_dividers.ref_div != max_dividers.ref_div) || + (min_dividers.post_div != max_dividers.post_div) || + (max_dividers.ref_div != current_max_dividers.ref_div) || + (max_dividers.post_div != current_max_dividers.post_div)) + return -EINVAL; - WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); + rs780_force_fbdiv(rdev, max_dividers.fb_div); if (max_dividers.fb_div > min_dividers.fb_div) { WREG32_P(FVTHROT_FBDIV_REG0, @@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev, (new_state->sclk_low == old_state->sclk_low)) return; + if (new_state->sclk_high == new_state->sclk_low) + return; + rs780_clk_scaling_enable(rdev, true); } @@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev) rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); if (pi->voltage_control) { - rs780_force_voltage_to_high(rdev); + rs780_force_voltage(rdev, pi->max_voltage); mdelay(5); } @@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev, if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else if (r600_is_uvd_state(rps->class, rps->class2)) { - rps->vclk = RS780_DEFAULT_VCLK_FREQ; - rps->dclk = RS780_DEFAULT_DCLK_FREQ; } else { rps->vclk = 0; rps->dclk = 0; } + if (r600_is_uvd_state(rps->class, rps->class2)) { + if ((rps->vclk == 0) || (rps->dclk == 0)) { + rps->vclk = RS780_DEFAULT_VCLK_FREQ; + rps->dclk = RS780_DEFAULT_DCLK_FREQ; + } + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) rdev->pm.dpm.boot_ps = rps; if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) @@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", ps->sclk_high, ps->max_voltage); } + +int rs780_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level) +{ + struct igp_power_info *pi = rs780_get_pi(rdev); + struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct igp_ps *ps = rs780_get_ps(rps); + struct atom_clock_dividers dividers; + int ret; + + rs780_clk_scaling_enable(rdev, false); + rs780_voltage_scaling_enable(rdev, false); + + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { + if (pi->voltage_control) + rs780_force_voltage(rdev, pi->max_voltage); + + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + ps->sclk_high, false, ÷rs); + if (ret) + return ret; + + rs780_force_fbdiv(rdev, dividers.fb_div); + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + ps->sclk_low, false, ÷rs); + if (ret) + return ret; + + rs780_force_fbdiv(rdev, dividers.fb_div); + + if (pi->voltage_control) + rs780_force_voltage(rdev, pi->min_voltage); + } else { + if (pi->voltage_control) + rs780_force_voltage(rdev, pi->max_voltage); + + if (ps->sclk_high != ps->sclk_low) { + WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV); + rs780_clk_scaling_enable(rdev, true); + } + + if (pi->voltage_control) { + rs780_voltage_scaling_enable(rdev, true); + rs780_enable_voltage_scaling(rdev, rps); + } + } + + rdev->pm.dpm.forced_level = level; + + return 0; +} diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8ea1573ae820..873eb4b193b4 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev) uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; uint32_t r; + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); r = RREG32(MC_IND_DATA); WREG32(MC_IND_INDEX, 0); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); + return r; } void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); WREG32(MC_IND_DATA, (v)); WREG32(MC_IND_INDEX, 0); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index ab1f2016f21e..5811d277a36a 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; - return 0; } diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 8cbb85dae5aa..913b025ae9b3 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev) rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); - return ret; - } - return 0; } @@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev, if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else if (r600_is_uvd_state(rps->class, rps->class2)) { - rps->vclk = RV770_DEFAULT_VCLK_FREQ; - rps->dclk = RV770_DEFAULT_DCLK_FREQ; } else { rps->vclk = 0; rps->dclk = 0; } + if (r600_is_uvd_state(rps->class, rps->class2)) { + if ((rps->vclk == 0) || (rps->dclk == 0)) { + rps->vclk = RV770_DEFAULT_VCLK_FREQ; + rps->dclk = RV770_DEFAULT_DCLK_FREQ; + } + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) rdev->pm.dpm.boot_ps = rps; if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c index ab95da570215..b2a224407365 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.c +++ b/drivers/gpu/drm/radeon/rv770_smc.c @@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] = 0x08, 0x72, 0x08, 0x72 }; -int rv770_set_smc_sram_address(struct radeon_device *rdev, - u16 smc_address, u16 limit) +static int rv770_set_smc_sram_address(struct radeon_device *rdev, + u16 smc_address, u16 limit) { u32 addr; @@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, u16 smc_start_address, const u8 *src, u16 byte_count, u16 limit) { + unsigned long flags; u32 data, original_data, extra_shift; u16 addr; - int ret; + int ret = 0; if (smc_start_address & 3) return -EINVAL; @@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, addr = smc_start_address; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); while (byte_count >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_SRAM_DATA, data); @@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; original_data = RREG32(SMC_SRAM_DATA); @@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev, ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_SRAM_DATA, data); } - return 0; +done: + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); + + return ret; } static int rv770_program_interrupt_vectors(struct radeon_device *rdev, @@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev) static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) { + unsigned long flags; u16 i; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); for (i = 0; i < limit; i += 4) { rv770_set_smc_sram_address(rdev, i, limit); WREG32(SMC_SRAM_DATA, 0); } + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); } int rv770_load_smc_ucode(struct radeon_device *rdev, @@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev, int rv770_read_smc_sram_dword(struct radeon_device *rdev, u16 smc_address, u32 *value, u16 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = rv770_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; - - *value = RREG32(SMC_SRAM_DATA); + if (ret == 0) + *value = RREG32(SMC_SRAM_DATA); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - return 0; + return ret; } int rv770_write_smc_sram_dword(struct radeon_device *rdev, u16 smc_address, u32 value, u16 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = rv770_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; + if (ret == 0) + WREG32(SMC_SRAM_DATA, value); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - WREG32(SMC_SRAM_DATA, value); - - return 0; + return ret; } diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h index f78d92a4b325..3b2c963c4880 100644 --- a/drivers/gpu/drm/radeon/rv770_smc.h +++ b/drivers/gpu/drm/radeon/rv770_smc.h @@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 -int rv770_set_smc_sram_address(struct radeon_device *rdev, - u16 smc_address, u16 limit); int rv770_copy_bytes_to_smc(struct radeon_device *rdev, u16 smc_start_address, const u8 *src, u16 byte_count, u16 limit); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9fe60e542922..1ae277152cc7 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -852,7 +852,7 @@ #define AFMT_VBI_PACKET_CONTROL 0x7608 # define AFMT_GENERIC0_UPDATE (1 << 2) #define AFMT_INFOFRAME_CONTROL0 0x760c -# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ +# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */ # define AFMT_AUDIO_INFO_UPDATE (1 << 7) # define AFMT_MPEG_INFO_UPDATE (1 << 10) #define AFMT_GENERIC0_7 0x7610 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3e23b757dcfa..c354c1094967 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); +static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable); static const u32 verde_rlc_save_restore_register_list[] = { @@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev) u32 rb_bufsz; int r; + si_enable_gui_idle_interrupt(rdev, false); + WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); @@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } + si_enable_gui_idle_interrupt(rdev, true); + return 0; } @@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev, { u32 tmp; - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); WREG32(RLC_TTOP_D, tmp); @@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev, u32 block, bool enable) { if (block & RADEON_CG_BLOCK_GFX) { + si_enable_gui_idle_interrupt(rdev, false); /* order matters! */ if (enable) { si_enable_mgcg(rdev, true); @@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev, si_enable_cgcg(rdev, false); si_enable_mgcg(rdev, false); } + si_enable_gui_idle_interrupt(rdev, true); } if (block & RADEON_CG_BLOCK_MC) { @@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev) si_init_dma_pg(rdev); } si_init_ao_cu_mask(rdev); - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { si_init_gfx_cgpg(rdev); } si_enable_dma_pg(rdev, true); @@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING2, 0); tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev) int si_irq_set(struct radeon_device *rdev) { - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 cp_int_cntl; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; @@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev) return 0; } + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + if (!ASIC_IS_NODCE(rdev)) { hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 5be9b4e72350..cfe5d4d28915 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6075,12 +6075,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev) return ret; } - ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); - if (ret) { - DRM_ERROR("si_dpm_force_performance_level failed\n"); - return ret; - } - si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_SDMA | diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c index 5f524c0a541e..d422a1cbf727 100644 --- a/drivers/gpu/drm/radeon/si_smc.c +++ b/drivers/gpu/drm/radeon/si_smc.c @@ -29,8 +29,8 @@ #include "ppsmc.h" #include "radeon_ucode.h" -int si_set_smc_sram_address(struct radeon_device *rdev, - u32 smc_address, u32 limit) +static int si_set_smc_sram_address(struct radeon_device *rdev, + u32 smc_address, u32 limit) { if (smc_address & 3) return -EINVAL; @@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, u32 smc_start_address, const u8 *src, u32 byte_count, u32 limit) { - int ret; + unsigned long flags; + int ret = 0; u32 data, original_data, addr, extra_shift; if (smc_start_address & 3) @@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, addr = smc_start_address; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); while (byte_count >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_IND_DATA_0, data); @@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; original_data = RREG32(SMC_IND_DATA_0); @@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev, ret = si_set_smc_sram_address(rdev, addr, limit); if (ret) - return ret; + goto done; WREG32(SMC_IND_DATA_0, data); } - return 0; + +done: + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); + + return ret; } void si_start_smc(struct radeon_device *rdev) @@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev) int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) { + unsigned long flags; u32 ucode_start_address; u32 ucode_size; const u8 *src; @@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) return -EINVAL; src = (const u8 *)rdev->smc_fw->data; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); while (ucode_size >= 4) { @@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) ucode_size -= 4; } WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return 0; } @@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 *value, u32 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = si_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; + if (ret == 0) + *value = RREG32(SMC_IND_DATA_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - *value = RREG32(SMC_IND_DATA_0); - return 0; + return ret; } int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, u32 value, u32 limit) { + unsigned long flags; int ret; + spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = si_set_smc_sram_address(rdev, smc_address, limit); - if (ret) - return ret; + if (ret == 0) + WREG32(SMC_IND_DATA_0, value); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); - WREG32(SMC_IND_DATA_0, value); - return 0; + return ret; } diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 864761c0120e..96ea6db8bf57 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev) if (pi->enable_dpm) sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; - return 0; } diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index b07b7b8f1aff..7f998bf1cc9d 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev, pi->requested_rps.ps_priv = &pi->requested_ps; } +void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) +{ + struct trinity_power_info *pi = trinity_get_pi(rdev); + + if (pi->enable_bapm) { + trinity_acquire_mutex(rdev); + trinity_dpm_bapm_enable(rdev, enable); + trinity_release_mutex(rdev); + } +} + int trinity_dpm_enable(struct radeon_device *rdev) { struct trinity_power_info *pi = trinity_get_pi(rdev); @@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev) trinity_program_sclk_dpm(rdev); trinity_start_dpm(rdev); trinity_wait_for_dpm_enabled(rdev); + trinity_dpm_bapm_enable(rdev, false); trinity_release_mutex(rdev); if (rdev->irq.installed && @@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev) trinity_release_mutex(rdev); return; } + trinity_dpm_bapm_enable(rdev, false); trinity_disable_clock_power_gating(rdev); sumo_clear_vc(rdev); trinity_wait_for_level_0(rdev); @@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev) trinity_acquire_mutex(rdev); if (pi->enable_dpm) { + if (pi->enable_bapm) + trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power); trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); trinity_enable_power_level_0(rdev); trinity_force_level_0(rdev); @@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev) trinity_force_level_0(rdev); trinity_unforce_levels(rdev); trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; } trinity_release_mutex(rdev); @@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; + pi->enable_bapm = true; pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h index e82df071f8b3..c261657750ca 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.h +++ b/drivers/gpu/drm/radeon/trinity_dpm.h @@ -108,6 +108,7 @@ struct trinity_power_info { bool enable_auto_thermal_throttling; bool enable_dpm; bool enable_sclk_ds; + bool enable_bapm; bool uvd_dpm; struct radeon_ps current_rps; struct trinity_ps current_ps; @@ -118,6 +119,7 @@ struct trinity_power_info { #define TRINITY_AT_DFLT 30 /* trinity_smc.c */ +int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable); int trinity_dpm_config(struct radeon_device *rdev, bool enable); int trinity_uvd_dpm_config(struct radeon_device *rdev); int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c index a42d89f1830c..9672bcbc7312 100644 --- a/drivers/gpu/drm/radeon/trinity_smc.c +++ b/drivers/gpu/drm/radeon/trinity_smc.c @@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id) return 0; } +int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable) +{ + if (enable) + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); + else + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); +} + int trinity_dpm_config(struct radeon_device *rdev, bool enable) { if (enable) diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8dbe9d0ae9a7..8bf646183bac 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); switch (ret) { case -EAGAIN: - set_need_resched(); case 0: case -ERESTARTSYS: return VM_FAULT_NOPAGE; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index e893f6e1937d..af0259708358 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if (!conflict->bridge_has_one_vga) { vga_irq_set_state(conflict, false); flags |= PCI_VGA_STATE_CHANGE_DECODES; - if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) + if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) pci_bits |= PCI_COMMAND_MEMORY; - if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) + if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) pci_bits |= PCI_COMMAND_IO; } @@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(conflict->pdev, false, pci_bits, flags); - conflict->owns &= ~lwants; + conflict->owns &= ~match; /* If he also owned non-legacy, that is no longer the case */ - if (lwants & VGA_RSRC_LEGACY_MEM) + if (match & VGA_RSRC_LEGACY_MEM) conflict->owns &= ~VGA_RSRC_NORMAL_MEM; - if (lwants & VGA_RSRC_LEGACY_IO) + if (match & VGA_RSRC_LEGACY_IO) conflict->owns &= ~VGA_RSRC_NORMAL_IO; } @@ -644,10 +644,12 @@ bail: static inline void vga_update_device_decodes(struct vga_device *vgadev, int new_decodes) { - int old_decodes; - struct vga_device *new_vgadev, *conflict; + int old_decodes, decodes_removed, decodes_unlocked; old_decodes = vgadev->decodes; + decodes_removed = ~new_decodes & old_decodes; + decodes_unlocked = vgadev->locks & decodes_removed; + vgadev->owns &= ~decodes_removed; vgadev->decodes = new_decodes; pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", @@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns)); - - /* if we own the decodes we should move them along to - another card */ - if ((vgadev->owns & old_decodes) && (vga_count > 1)) { - /* set us to own nothing */ - vgadev->owns &= ~old_decodes; - list_for_each_entry(new_vgadev, &vga_list, list) { - if ((new_vgadev != vgadev) && - (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) { - pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev)); - conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK); - if (!conflict) - __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK); - break; - } - } + /* if we removed locked decodes, lock count goes to zero, and release */ + if (decodes_unlocked) { + if (decodes_unlocked & VGA_RSRC_LEGACY_IO) + vgadev->io_lock_cnt = 0; + if (decodes_unlocked & VGA_RSRC_LEGACY_MEM) + vgadev->mem_lock_cnt = 0; + __vga_put(vgadev, decodes_unlocked); } /* change decodes counter */ - if (old_decodes != new_decodes) { - if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) - vga_decode_count++; - else - vga_decode_count--; - } + if (old_decodes & VGA_RSRC_LEGACY_MASK && + !(new_decodes & VGA_RSRC_LEGACY_MASK)) + vga_decode_count--; + if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && + new_decodes & VGA_RSRC_LEGACY_MASK) + vga_decode_count++; pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); } |