summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-06-21 11:03:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-21 11:03:33 -0700
commit0728f6c3cab107f0aab2c8ded1292dd2cc41a228 (patch)
tree1a758a878ac1a17102c09585c072a2ab3d0d3740 /drivers/gpu/drm
parentdb54615e21419c3cb4d699a0b0aa16cc44d0e9da (diff)
parent5eab9cf87b6c261f4e2f6c7623171cc2f5ea1a9c (diff)
Merge tag 'drm-fixes-2019-06-21' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "Just catching up on the week since back from holidays, everything seems quite sane. core: - copy_to_user fix for really legacy codepaths. vmwgfx: - two dma fixes - one virt hw interaction fix i915: - modesetting fix - gvt fix panfrost: - BO unmapping fix imx: - image converter fixes" * tag 'drm-fixes-2019-06-21' of git://anongit.freedesktop.org/drm/drm: drm/i915: Don't clobber M/N values during fastset check drm: return -EFAULT if copy_to_user() fails drm/panfrost: Make sure a BO is only unmapped when appropriate drm/i915/gvt: ignore unexpected pvinfo write gpu: ipu-v3: image-convert: Fix image downsize coefficients gpu: ipu-v3: image-convert: Fix input bytesperline for packed formats gpu: ipu-v3: image-convert: Fix input bytesperline width/height align drm/vmwgfx: fix a warning due to missing dma_parms drm/vmwgfx: Honor the sg list segment size limitation drm/vmwgfx: Use the backdoor port if the HB port is not available
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_bufs.c5
-rw-r--r--drivers/gpu/drm/drm_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c15
-rw-r--r--drivers/gpu/drm/i915/intel_display.c38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c146
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
10 files changed, 182 insertions, 52 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bfc419ed9d6c..ceca79b1468d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1340,7 +1340,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
+
+ if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
+ return -EFAULT;
+ return 0;
}
int drm_legacy_infobufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 374b372da58a..3972ebe48463 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -375,7 +375,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
+
+ if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
+ return -EFAULT;
+ return 0;
}
static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a6ade66349bd..25f78196b964 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1254,18 +1254,15 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- u32 data;
- int ret;
-
- write_vreg(vgpu, offset, p_data, bytes);
- data = vgpu_vreg(vgpu, offset);
+ u32 data = *(u32 *)p_data;
+ bool invalid_write = false;
switch (offset) {
case _vgtif_reg(display_ready):
send_display_ready_uevent(vgpu, data ? 1 : 0);
break;
case _vgtif_reg(g2v_notify):
- ret = handle_g2v_notification(vgpu, data);
+ handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
case _vgtif_reg(cursor_x_hot):
@@ -1282,13 +1279,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_hi):
break;
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ invalid_write = true;
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break;
default:
+ invalid_write = true;
gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
+
+ if (!invalid_write)
+ write_vreg(vgpu, offset, p_data, bytes);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b69440cf41ea..75105a2c59ea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12005,9 +12005,6 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
intel_compare_m_n(m_n->link_m, m_n->link_n,
m2_n2->link_m, m2_n2->link_n, !adjust)) {
- if (adjust)
- *m2_n2 = *m_n;
-
return true;
}
@@ -13149,6 +13146,33 @@ static int calc_watermark_data(struct intel_atomic_state *state)
return 0;
}
+static void intel_crtc_check_fastset(struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(new_crtc_state->base.crtc->dev);
+
+ if (!intel_pipe_config_compare(dev_priv, old_crtc_state,
+ new_crtc_state, true))
+ return;
+
+ new_crtc_state->base.mode_changed = false;
+ new_crtc_state->update_pipe = true;
+
+ /*
+ * If we're not doing the full modeset we want to
+ * keep the current M/N values as they may be
+ * sufficiently different to the computed values
+ * to cause problems.
+ *
+ * FIXME: should really copy more fuzzy state here
+ */
+ new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
+ new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
+ new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
+ new_crtc_state->has_drrs = old_crtc_state->has_drrs;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13197,12 +13221,8 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
- if (intel_pipe_config_compare(dev_priv,
- to_intel_crtc_state(old_crtc_state),
- pipe_config, true)) {
- crtc_state->mode_changed = false;
- pipe_config->update_pipe = true;
- }
+ intel_crtc_check_fastset(to_intel_crtc_state(old_crtc_state),
+ pipe_config);
if (needs_modeset(crtc_state))
any_ms = true;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index a5528a360ef4..97d64ceedbb4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,7 +19,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
- panfrost_mmu_unmap(bo);
+ if (bo->is_mapped)
+ panfrost_mmu_unmap(bo);
spin_lock(&pfdev->mm_lock);
drm_mm_remove_node(&bo->node);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 045000eb5fcf..6dbcaba020fc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -11,6 +11,7 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct drm_mm_node node;
+ bool is_mapped;
};
static inline
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 762b1bd2a8c2..92ac995dd9c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -156,6 +156,9 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
struct sg_table *sgt;
int ret;
+ if (WARN_ON(bo->is_mapped))
+ return 0;
+
sgt = drm_gem_shmem_get_pages_sgt(obj);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
@@ -189,6 +192,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
+ bo->is_mapped = true;
return 0;
}
@@ -203,6 +207,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
size_t unmapped_len = 0;
int ret;
+ if (WARN_ON(!bo->is_mapped))
+ return;
+
dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
ret = pm_runtime_get_sync(pfdev->dev);
@@ -230,6 +237,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
+ bo->is_mapped = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4ff11a0077e1..9506190a0300 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -747,6 +747,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
+ dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
+ SCATTERLIST_MAX_SEGMENT));
+
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 8b9270f31409..e4e09d47c5c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel)
return 0;
}
+/**
+ * vmw_port_hb_out - Send the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @msg: NULL-terminated message.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
+ const char *msg, bool hb)
+{
+ unsigned long si, di, eax, ebx, ecx, edx;
+ unsigned long msg_len = strlen(msg);
+
+ if (hb) {
+ unsigned long bp = channel->cookie_high;
+
+ si = (uintptr_t) msg;
+ di = channel->cookie_low;
+
+ VMW_PORT_HB_OUT(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ msg_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ return ebx;
+ }
+
+ /* HB port not available. Send the message 4 bytes at a time. */
+ ecx = MESSAGE_STATUS_SUCCESS << 16;
+ while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
+ unsigned int bytes = min_t(size_t, msg_len, 4);
+ unsigned long word = 0;
+
+ memcpy(&word, msg, bytes);
+ msg_len -= bytes;
+ msg += bytes;
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
+ word, si, di,
+ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+ }
+
+ return ecx;
+}
+
+/**
+ * vmw_port_hb_in - Receive the message payload either through the
+ * high-bandwidth port if available, or through the backdoor otherwise.
+ * @channel: The rpc channel.
+ * @reply: Pointer to buffer holding reply.
+ * @reply_len: Length of the reply.
+ * @hb: Whether the high-bandwidth port is available.
+ *
+ * Return: The port status.
+ */
+static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
+ unsigned long reply_len, bool hb)
+{
+ unsigned long si, di, eax, ebx, ecx, edx;
+
+ if (hb) {
+ unsigned long bp = channel->cookie_low;
+
+ si = channel->cookie_high;
+ di = (uintptr_t) reply;
+
+ VMW_PORT_HB_IN(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ reply_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ return ebx;
+ }
+
+ /* HB port not available. Retrieve the message 4 bytes at a time. */
+ ecx = MESSAGE_STATUS_SUCCESS << 16;
+ while (reply_len) {
+ unsigned int bytes = min_t(unsigned long, reply_len, 4);
+
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
+ MESSAGE_STATUS_SUCCESS, si, di,
+ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+ break;
+
+ memcpy(reply, &ebx, bytes);
+ reply_len -= bytes;
+ reply += bytes;
+ }
+
+ return ecx;
+}
/**
@@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel)
*/
static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
{
- unsigned long eax, ebx, ecx, edx, si, di, bp;
+ unsigned long eax, ebx, ecx, edx, si, di;
size_t msg_len = strlen(msg);
int retries = 0;
-
while (retries < RETRIES) {
retries++;
@@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
- /* Expected success + high-bandwidth. Give up. */
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ /* Expected success. Give up. */
return -EINVAL;
}
/* Send msg */
- si = (uintptr_t) msg;
- di = channel->cookie_low;
- bp = channel->cookie_high;
-
- VMW_PORT_HB_OUT(
- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- msg_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ ebx = vmw_port_hb_out(channel, msg,
+ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
return 0;
@@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
- unsigned long eax, ebx, ecx, edx, si, di, bp;
+ unsigned long eax, ebx, ecx, edx, si, di;
char *reply;
size_t reply_len;
int retries = 0;
@@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
- if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
- (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL;
}
@@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
/* Receive buffer */
- si = channel->cookie_high;
- di = (uintptr_t) reply;
- bp = channel->cookie_low;
-
- VMW_PORT_HB_IN(
- (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- reply_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
-
+ ebx = vmw_port_hb_in(channel, reply, reply_len,
+ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index a6ea75b58a83..d8ea3dd10af0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -441,11 +441,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long)
- vsgt->num_pages << PAGE_SHIFT,
- GFP_KERNEL);
+ ret = __sg_alloc_table_from_pages
+ (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long) vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->dev->dev),
+ GFP_KERNEL);
if (unlikely(ret != 0))
goto out_sg_alloc_fail;