diff options
-rw-r--r-- | drivers/gpu/drm/drm_atomic.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_dp_helper.c | 99 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_csr.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp_mst.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dsi.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_display.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_drv.h | 2 | ||||
-rw-r--r-- | include/uapi/drm/i915_drm.h | 2 |
16 files changed, 218 insertions, 83 deletions
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 434915448ea0..f7d5166f89b2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1515,7 +1515,8 @@ retry: copied_props++; } - if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) { + if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && + !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { plane = obj_to_plane(obj); plane_mask |= (1 << drm_plane_index(plane)); plane->old_fb = plane->fb; @@ -1537,10 +1538,11 @@ retry: } if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { + /* + * Unlike commit, check_only does not clean up state. + * Below we call drm_atomic_state_free for it. + */ ret = drm_atomic_check_only(state); - /* _check_only() does not free state, unlike _commit() */ - if (!ret) - drm_atomic_state_free(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_async_commit(state); } else { @@ -1567,25 +1569,30 @@ out: plane->old_fb = NULL; } + if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { + /* + * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive, + * if they weren't, this code should be called on success + * for TEST_ONLY too. + */ + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (!crtc_state->event) + continue; + + destroy_vblank_event(dev, file_priv, + crtc_state->event); + } + } + if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } - if (ret) { - if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (!crtc_state->event) - continue; - - destroy_vblank_event(dev, file_priv, - crtc_state->event); - } - } - + if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) drm_atomic_state_free(state); - } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 80a02a412607..291734e87fca 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -159,6 +159,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw) } EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); +#define AUX_RETRY_INTERVAL 500 /* us */ + /** * DOC: dp helpers * @@ -213,7 +215,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, return -EIO; case DP_AUX_NATIVE_REPLY_DEFER: - usleep_range(400, 500); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); break; } } @@ -422,6 +424,90 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) I2C_FUNC_10BIT_ADDR; } +#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */ +#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */ +#define AUX_STOP_LEN 4 +#define AUX_CMD_LEN 4 +#define AUX_ADDRESS_LEN 20 +#define AUX_REPLY_PAD_LEN 4 +#define AUX_LENGTH_LEN 8 + +/* + * Calculate the duration of the AUX request/reply in usec. Gives the + * "best" case estimate, ie. successful while as short as possible. + */ +static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN; + + if ((msg->request & DP_AUX_I2C_READ) == 0) + len += msg->size * 8; + + return len; +} + +static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg) +{ + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + + AUX_CMD_LEN + AUX_REPLY_PAD_LEN; + + /* + * For read we expect what was asked. For writes there will + * be 0 or 1 data bytes. Assume 0 for the "best" case. + */ + if (msg->request & DP_AUX_I2C_READ) + len += msg->size * 8; + + return len; +} + +#define I2C_START_LEN 1 +#define I2C_STOP_LEN 1 +#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */ +#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */ + +/* + * Calculate the length of the i2c transfer in usec, assuming + * the i2c bus speed is as specified. Gives the the "worst" + * case estimate, ie. successful while as long as possible. + * Doesn't account the the "MOT" bit, and instead assumes each + * message includes a START, ADDRESS and STOP. Neither does it + * account for additional random variables such as clock stretching. + */ +static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + /* AUX bitrate is 1MHz, i2c bitrate as specified */ + return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN + + msg->size * I2C_DATA_LEN + + I2C_STOP_LEN) * 1000, i2c_speed_khz); +} + +/* + * Deterine how many retries should be attempted to successfully transfer + * the specified message, based on the estimated durations of the + * i2c and AUX transfers. + */ +static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg, + int i2c_speed_khz) +{ + int aux_time_us = drm_dp_aux_req_duration(msg) + + drm_dp_aux_reply_duration(msg); + int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz); + + return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL); +} + +/* + * FIXME currently assumes 10 kHz as some real world devices seem + * to require it. We should query/set the speed via DPCD if supported. + */ +static int dp_aux_i2c_speed_khz __read_mostly = 10; +module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_speed_khz, + "Assumed speed of the i2c bus in kHz, (1-400, default 10)"); + /* * Transfer a single I2C-over-AUX message and handle various error conditions, * retrying the transaction as appropriate. It is assumed that the @@ -434,13 +520,16 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { unsigned int retry, defer_i2c; int ret; - /* * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device * is required to retry at least seven times upon receiving AUX_DEFER * before giving up the AUX transaction. + * + * We also try to account for the i2c bus speed. */ - for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) { + int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); + + for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { mutex_lock(&aux->hw_mutex); ret = aux->transfer(aux, msg); mutex_unlock(&aux->hw_mutex); @@ -476,7 +565,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) * For now just defer for long enough to hopefully be * safe for all use-cases. */ - usleep_range(500, 600); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); continue; default: @@ -506,7 +595,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) aux->i2c_defer_count++; if (defer_i2c < 7) defer_i2c++; - usleep_range(400, 500); + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); continue; default: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 81adf89b92f1..e1db8de52851 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1929,6 +1929,8 @@ struct drm_i915_private { struct skl_wm_values skl_hw; struct vlv_wm_values vlv; }; + + uint8_t max_level; } wm; struct i915_runtime_pm pm; @@ -3384,13 +3386,13 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper, lower, tmp; \ - tmp = I915_READ(upper_reg); \ + u32 upper, lower, old_upper, loop = 0; \ + upper = I915_READ(upper_reg); \ do { \ - upper = tmp; \ + old_upper = upper; \ lower = I915_READ(lower_reg); \ - tmp = I915_READ(upper_reg); \ - } while (upper != tmp); \ + upper = I915_READ(upper_reg); \ + } while (upper != old_upper && loop++ < 2); \ (u64)upper << 32 | lower; }) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 923a3c4bf0b7..a953d4975b8c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1032,6 +1032,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; + obj->dirty = 1; /* be paranoid */ obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains; @@ -1039,7 +1040,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, i915_vma_move_to_active(vma, req); if (obj->base.write_domain) { - obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); intel_fb_obj_invalidate(obj, ORIGIN_CS); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b5fb1430c1d7..5a244ab9395b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1558,7 +1558,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, - hotplug_trigger, hpd_status_g4x, + hotplug_trigger, hpd_status_i915, i9xx_port_hotplug_long_detect); intel_hpd_irq_handler(dev, pin_mask, long_mask); } diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index ba1ae031e6fd..d0f1b8d833cd 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -350,7 +350,7 @@ static void finish_csr_load(const struct firmware *fw, void *context) } csr->mmio_count = dmc_header->mmio_count; for (i = 0; i < dmc_header->mmio_count; i++) { - if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE && + if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", dmc_header->mmioaddr[i]); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ca9278be49f7..8cc9264f7809 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6305,7 +6305,7 @@ static void intel_connector_check_state(struct intel_connector *connector) connector->base.name); if (connector->get_hw_state(connector)) { - struct drm_encoder *encoder = &connector->encoder->base; + struct intel_encoder *encoder = connector->encoder; struct drm_connector_state *conn_state = connector->base.state; I915_STATE_WARN(!crtc, @@ -6317,13 +6317,13 @@ static void intel_connector_check_state(struct intel_connector *connector) I915_STATE_WARN(!crtc->state->active, "connector is active, but attached crtc isn't\n"); - if (!encoder) + if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; - I915_STATE_WARN(conn_state->best_encoder != encoder, + I915_STATE_WARN(conn_state->best_encoder != &encoder->base, "atomic encoder doesn't match attached encoder\n"); - I915_STATE_WARN(conn_state->crtc != encoder->crtc, + I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { I915_STATE_WARN(crtc && crtc->state->active, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 983553cf8b74..3e4be5a3becd 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -173,6 +173,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) return; } + /* MST encoders are bound to a crtc, not to a connector, + * force the mapping here for get_hw_state. + */ + found->encoder = encoder; + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); intel_mst->port = found->port; @@ -400,7 +405,7 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) { - if (connector->encoder) { + if (connector->encoder && connector->base.state->crtc) { enum pipe pipe; if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) return false; diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 4a601cf90f16..32a6c7184ca4 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -1048,11 +1048,7 @@ void intel_dsi_init(struct drm_device *dev) intel_connector->unregister = intel_connector_unregister; /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */ - if (dev_priv->vbt.dsi.config->dual_link) { - /* XXX: does dual link work on either pipe? */ - intel_encoder->crtc_mask = (1 << PIPE_A); - intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); - } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { + if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { intel_encoder->crtc_mask = (1 << PIPE_A); intel_dsi->ports = (1 << PORT_A); } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) { @@ -1060,6 +1056,9 @@ void intel_dsi_init(struct drm_device *dev) intel_dsi->ports = (1 << PORT_C); } + if (dev_priv->vbt.dsi.config->dual_link) + intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); + /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fff0c22682ee..ddbb7ed0a193 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -955,8 +955,6 @@ enum vlv_wm_level { VLV_WM_LEVEL_PM2, VLV_WM_LEVEL_PM5, VLV_WM_LEVEL_DDR_DVFS, - CHV_WM_NUM_LEVELS, - VLV_WM_NUM_LEVELS = 1, }; /* latency must be in 0.1us units. */ @@ -982,9 +980,13 @@ static void vlv_setup_wm_latency(struct drm_device *dev) /* all latencies in usec */ dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; + if (IS_CHERRYVIEW(dev_priv)) { dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + + dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; } } @@ -1137,10 +1139,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) memset(wm_state, 0, sizeof(*wm_state)); wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; - if (IS_CHERRYVIEW(dev)) - wm_state->num_levels = CHV_WM_NUM_LEVELS; - else - wm_state->num_levels = VLV_WM_NUM_LEVELS; + wm_state->num_levels = to_i915(dev)->wm.max_level + 1; wm_state->num_active_planes = 0; @@ -1220,7 +1219,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) } /* clear any (partially) filled invalid levels */ - for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) { + for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); } @@ -1324,10 +1323,7 @@ static void vlv_merge_wm(struct drm_device *dev, struct intel_crtc *crtc; int num_active_crtcs = 0; - if (IS_CHERRYVIEW(dev)) - wm->level = VLV_WM_LEVEL_DDR_DVFS; - else - wm->level = VLV_WM_LEVEL_PM2; + wm->level = to_i915(dev)->wm.max_level; wm->cxsr = true; for_each_intel_crtc(dev, crtc) { @@ -4083,9 +4079,29 @@ void vlv_wm_get_hw_state(struct drm_device *dev) if (val & DSP_MAXFIFO_PM5_ENABLE) wm->level = VLV_WM_LEVEL_PM5; + /* + * If DDR DVFS is disabled in the BIOS, Punit + * will never ack the request. So if that happens + * assume we don't have to enable/disable DDR DVFS + * dynamically. To test that just set the REQ_ACK + * bit to poke the Punit, but don't change the + * HIGH/LOW bits so that we don't actually change + * the current state. + */ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - if ((val & FORCE_DDR_HIGH_FREQ) == 0) - wm->level = VLV_WM_LEVEL_DDR_DVFS; + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { + DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " + "assuming DDR DVFS is disabled\n"); + dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; + } else { + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if ((val & FORCE_DDR_HIGH_FREQ) == 0) + wm->level = VLV_WM_LEVEL_DDR_DVFS; + } mutex_unlock(&dev_priv->rps.hw_lock); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 9dd1cac81e80..e8eb14e438f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -689,6 +689,7 @@ nvkm_device_pci_10de_11e3[] = { static const struct nvkm_device_pci_vendor nvkm_device_pci_10de_11fc[] = { + { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */ { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ {} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c index 426ba0025a8d..85c5b7fea5f5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c @@ -1048,11 +1048,11 @@ nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, if (ret == 0) { nvkm_kmap(*pgpuobj); nvkm_wo32(*pgpuobj, 0x00, object->oclass); - nvkm_wo32(*pgpuobj, 0x04, 0x00000000); - nvkm_wo32(*pgpuobj, 0x08, 0x00000000); #ifdef __BIG_ENDIAN - nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000); + nvkm_mo32(*pgpuobj, 0x00, 0x00080000, 0x00080000); #endif + nvkm_wo32(*pgpuobj, 0x04, 0x00000000); + nvkm_wo32(*pgpuobj, 0x08, 0x00000000); nvkm_wo32(*pgpuobj, 0x0c, 0x00000000); nvkm_done(*pgpuobj); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index 07feae620c8d..c233e3f653ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -326,7 +326,7 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) return -EIO; if (nvkm_msec(device, 2000, - u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f; + u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f; if (tmp == 0x0000003f) break; ) < 0) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index a8dbb3ef4e3c..7c6225c84ba6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector, *pwidth = head->width; *pheight = head->height; drm_mode_probed_add(connector, mode); + /* remember the last custom size for mode validation */ + qdev->monitors_config_width = mode->hdisplay; + qdev->monitors_config_height = mode->vdisplay; return 1; } +static struct mode_size { + int w; + int h; +} common_modes[] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} +}; + static int qxl_add_common_modes(struct drm_connector *connector, unsigned pwidth, unsigned pheight) @@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; int i; - struct mode_size { - int w; - int h; - } common_modes[] = { - { 640, 480}, - { 720, 480}, - { 800, 600}, - { 848, 480}, - {1024, 768}, - {1152, 768}, - {1280, 720}, - {1280, 800}, - {1280, 854}, - {1280, 960}, - {1280, 1024}, - {1440, 900}, - {1400, 1050}, - {1680, 1050}, - {1600, 1200}, - {1920, 1080}, - {1920, 1200} - }; - for (i = 0; i < ARRAY_SIZE(common_modes); i++) { mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); @@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector) static int qxl_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = ddev->dev_private; + int i; + /* TODO: is this called for user defined modes? (xrandr --add-mode) * TODO: check that the mode fits in the framebuffer */ - DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, - mode->vdisplay, mode->status); - return MODE_OK; + + if(qdev->monitors_config_width == mode->hdisplay && + qdev->monitors_config_height == mode->vdisplay) + return MODE_OK; + + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { + if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay) + return MODE_OK; + } + return MODE_BAD; } static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index d8549690801d..01a86948eb8c 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -325,6 +325,8 @@ struct qxl_device { struct work_struct fb_work; struct drm_property *hotplug_mode_update_property; + int monitors_config_width; + int monitors_config_height; }; /* forward declaration for QXL_INFO_IO */ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index dbd16a2d37db..fd5aa47bd689 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -358,7 +358,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_RESOURCE_STREAMER 36 typedef struct drm_i915_getparam { - s32 param; + __s32 param; /* * WARNING: Using pointers instead of fixed-size u64 means we need to write * compat32 code. Don't repeat this mistake. |