diff options
author | Leo Li <sunpeng.li@amd.com> | 2019-03-20 09:52:14 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2019-06-22 09:34:08 -0500 |
commit | 4e0cbbbfbc37fd5a8bdf653a1ebb7b271a506e2d (patch) | |
tree | 0301214ce6d153ff6d79c9fce8c93149fe4bcd5a /drivers/gpu/drm/amd/display | |
parent | 1b394e92a149db7ec277b7cc995888304149a5b1 (diff) |
drm/amd/display: Clean up locking in dcn*_apply_ctx_for_surface()
[Why]
dcn*_disable_plane() doesn't unlock the pipe anymore, making the extra
lock unnecessary.
In addition - during full plane updates - all necessary pipes should be
locked/unlocked together when modifying hubp to avoid tearing in
pipesplit setups.
[How]
Remove redundant locks, and add function to lock all pipes. If an
interdependent pipe update is required, lock down all pipes. Otherwise,
lock only the top pipe for the updated pipe tree.
Signed-off-by: Leo Li <sunpeng.li@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 46 |
2 files changed, 22 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8a8494207971..9d2437fb90a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2457,6 +2457,11 @@ static void dcn10_apply_ctx_for_surface( if (num_planes > 0) program_all_pipe_in_tree(dc, top_pipe_to_program, context); +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + /* Program secondary blending tree and writeback pipes */ + if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) + dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); +#endif if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2471,12 +2476,6 @@ static void dcn10_apply_ctx_for_surface( &pipe_ctx->ttu_regs); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); -#endif - if (interdependent_update) lock_all_pipes(dc, context, false); else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index f2e5e4928119..ccb9f277911a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1270,8 +1270,6 @@ static void dcn20_pipe_control_lock( } } - - static void dcn20_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, @@ -1282,6 +1280,7 @@ static void dcn20_apply_ctx_for_surface( int i; struct timing_generator *tg; bool removed_pipe[6] = { false }; + bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); @@ -1291,7 +1290,13 @@ static void dcn20_apply_ctx_for_surface( tg = top_pipe_to_program->stream_res.tg; - dcn20_pipe_control_lock(dc, top_pipe_to_program, true); + interdependent_update = top_pipe_to_program->plane_state && + top_pipe_to_program->plane_state->update_flags.bits.full_update; + + if (interdependent_update) + lock_all_pipes(dc, context, true); + else + dcn20_pipe_control_lock(dc, top_pipe_to_program, true); if (num_planes == 0) { /* OTG blank before remove all front end */ @@ -1311,16 +1316,9 @@ static void dcn20_apply_ctx_for_surface( */ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != 0xf) { + old_pipe_ctx->plane_res.hubp && + old_pipe_ctx->plane_res.hubp->opp_id != 0xf) dcn20_disable_plane(dc, old_pipe_ctx); - - /* - * power down fe will unlock when calling reset, need - * to lock it back here. Messy, need rework. - */ - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); - } } if ((!pipe_ctx->plane_state || @@ -1343,35 +1341,25 @@ static void dcn20_apply_ctx_for_surface( if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); - dcn20_pipe_control_lock(dc, top_pipe_to_program, false); - - if (top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update) + if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream - || !pipe_ctx->plane_state) + if (!pipe_ctx->stream || pipe_ctx->stream == stream || + !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) continue; - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); } - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->stream || pipe_ctx->stream == stream - || !pipe_ctx->plane_state) - continue; - - dcn20_pipe_control_lock(dc, pipe_ctx, false); - } + if (interdependent_update) + lock_all_pipes(dc, context, false); + else + dcn20_pipe_control_lock(dc, top_pipe_to_program, false); for (i = 0; i < dc->res_pool->pipe_count; i++) if (removed_pipe[i]) |