summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-07-21 14:04:10 +1000
committerDave Airlie <airlied@redhat.com>2017-07-21 14:04:10 +1000
commit660f6b5c639228cf53b4eecda86c8dd9055bed53 (patch)
tree668cf1238b694bfc101f413bd7058383ca92f1ea /drivers/gpu/drm
parent22a548d04853297b3855dcac2881d0cb52e3b99c (diff)
parent636c4c3e762b62aa93632c645ca65879285b16e3 (diff)
Merge tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc into drm-fixes
Core Changes: - fence: Introduce new fence flag to signify timestamp is populated (Chris) - mst: Avoid processing incomplete data + fix NULL dereference (Imre) Driver Changes: - vc4: Avoid WARN from grabbing a ref from vblank that's not on (Boris) Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Boris Brezillon <boris.brezillon@free-electrons.com> Cc: Imre Deak <imre.deak@intel.com> * tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc: drm/mst: Avoid processing partially received up/down message transactions drm/mst: Avoid dereferencing a NULL mstb in drm_dp_mst_handle_up_req() drm/mst: Fix error handling during MST sideband message reception drm/vc4: Fix VBLANK handling in crtc->enable() path dma-buf/fence: Avoid use of uninitialised timestamp
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
2 files changed, 76 insertions, 31 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
SCALER_DISPSTATX_EMPTY);
}
+static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+
+ if (crtc->state->event) {
+ unsigned long flags;
+
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+}
+
static void vc4_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
require_hvs_enabled(dev);
+ /* Enable vblank irq handling before crtc is started otherwise
+ * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
+ */
+ drm_crtc_vblank_on(crtc);
+ vc4_crtc_update_dlist(crtc);
+
/* Turn on the scaler, which will wait for vstart to start
* compositing.
*/
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
-
- /* Enable vblank irq handling after crtc is started. */
- drm_crtc_vblank_on(crtc);
}
static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (crtc->state->event) {
- unsigned long flags;
-
- crtc->state->event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
-
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- } else {
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
- }
+ /* Only update DISPLIST if the CRTC was already running and is not
+ * being disabled.
+ * vc4_crtc_enable() takes care of updating the dlist just after
+ * re-enabling VBLANK interrupts and before enabling the engine.
+ * If the CRTC is being disabled, there's no point in updating this
+ * information.
+ */
+ if (crtc->state->active && old_state->active)
+ vc4_crtc_update_dlist(crtc);
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));