summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c34
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h28
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c39
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c38
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c14
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c33
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c46
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c96
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c184
-rw-r--r--drivers/gpu/drm/drm_file.c141
-rw-r--r--drivers/gpu/drm/drm_lease.c3
-rw-r--r--drivers/gpu/drm/drm_prime.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c7
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Makefile26
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c167
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c529
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c474
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c936
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c150
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c256
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c15
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c87
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c381
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c127
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c89
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.c)170
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h (renamed from drivers/gpu/drm/i915/i915_gem_fence_reg.h)17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c272
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c118
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c117
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c68
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c223
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c124
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c53
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c35
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c12
-rw-r--r--drivers/gpu/drm/i915/i915_active.c137
-rw-r--r--drivers/gpu/drm/i915/i915_active.h14
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c298
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c23
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c636
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h46
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h132
-rw-r--r--drivers/gpu/drm/i915/i915_request.c29
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c10
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h23
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c122
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c41
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c5
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c24
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c12
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h22
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c22
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c90
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c101
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c118
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c98
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c88
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c89
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c98
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c115
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c118
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c620
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/timer.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/user.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c63
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/timer.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvif/userc361.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c25
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c11
-rw-r--r--drivers/gpu/drm/radeon/.gitignore1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c36
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c110
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c166
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/gpu/trace/Kconfig4
-rw-r--r--drivers/gpu/trace/Makefile3
-rw-r--r--drivers/gpu/trace/trace_gpu_mem.c13
351 files changed, 7769 insertions, 5611 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index f17d01f076c7..835c88318cec 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
+obj-$(CONFIG_TRACE_GPU_MEM) += trace/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 50dff69a0f6e..b1172d93c99c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = adev->pdev->rom;
+ size_t romlen = adev->pdev->romlen;
+ void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- adev->bios = kzalloc(size, GFP_KERNEL);
- if (adev->bios == NULL)
+ adev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!adev->bios)
return false;
- memcpy_fromio(adev->bios, bios, size);
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
- return false;
- }
+ memcpy_fromio(adev->bios, bios, romlen);
+ iounmap(bios);
- adev->bios_size = size;
+ if (!check_atom_bios(adev->bios, romlen))
+ goto free_bios;
+
+ adev->bios_size = romlen;
return true;
+free_bios:
+ kfree(adev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6f469facabfb..f84f9e35a73b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2008,8 +2008,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
*/
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
- return !!memcmp(adev->gart.ptr, adev->reset_magic,
- AMDGPU_RESET_MAGIC_NUM);
+ if (memcmp(adev->gart.ptr, adev->reset_magic,
+ AMDGPU_RESET_MAGIC_NUM))
+ return true;
+
+ if (!adev->in_gpu_reset)
+ return false;
+
+ /*
+ * For all ASICs with baco/mode1 reset, the VRAM is
+ * always assumed to be lost.
+ */
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+ case AMD_RESET_METHOD_MODE1:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -2742,6 +2758,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -3353,6 +3372,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
@@ -3910,8 +3932,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
- if (!r && amdgpu_ras_intr_triggered())
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev->mmhub.funcs &&
+ tmp_adev->mmhub.funcs->reset_ras_error_count)
+ tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ }
+
amdgpu_ras_intr_cleared();
+ }
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index bc3cf04a1a94..abe94a55ecad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -89,9 +89,13 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index dc42086a672b..deaa26808841 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -159,10 +159,6 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
- if (adev->psp.cap_fw) {
- release_firmware(adev->psp.cap_fw);
- adev->psp.cap_fw = NULL;
- }
if (adev->psp.ta_fw) {
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
@@ -250,7 +246,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
- if ((ucode->ucode_id == AMDGPU_UCODE_ID_CAP) || !timeout) {
+ if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
}
@@ -822,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_ras_ucode_size ||
!psp->adev->psp.ta_ras_start_addr) {
- dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -906,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_hdcp_ucode_size ||
!psp->adev->psp.ta_hdcp_start_addr) {
- dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
@@ -1052,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_dtm_ucode_size ||
!psp->adev->psp.ta_dtm_start_addr) {
- dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
@@ -1192,9 +1188,6 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
enum psp_gfx_fw_type *type)
{
switch (ucode->ucode_id) {
- case AMDGPU_UCODE_ID_CAP:
- *type = GFX_FW_TYPE_CAP;
- break;
case AMDGPU_UCODE_ID_SDMA0:
*type = GFX_FW_TYPE_SDMA0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 4a4d8f2ccca2..297435c0c7c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -252,9 +252,6 @@ struct psp_context
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
- /* cap firmware */
- const struct firmware *cap_fw;
-
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 43055a01f35e..ab379b44679c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -281,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ return size;
+ }
+
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
if (ret)
return -EINVAL;
@@ -394,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE,
+ "Query currently inaccessible\n");
+
if (amdgpu_ras_error_query(obj->adev, &info))
return -EINVAL;
@@ -1415,12 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
- /*
- * Query and print non zero error counter per IP block for
- * awareness before recovering GPU.
- */
- amdgpu_ras_log_on_err_counter(ras->adev);
+ list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c10ae1cdc1b9..6309ff72bd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -770,7 +770,6 @@ struct amdgpu_ttm_tt {
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
(1 << 0), /* HMM_PFN_VALID */
(1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
};
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
@@ -851,7 +850,7 @@ retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
@@ -968,7 +967,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
/* Map SG to device */
r = -ENOMEM;
nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- if (nents != ttm->sg->nents)
+ if (nents == 0)
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
@@ -1840,9 +1839,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*The reserved vram for memory training must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_ttm_training_reserve_vram_init(adev);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+ }
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 88f226070229..b0e656409c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -283,8 +283,7 @@ union amdgpu_firmware_header {
* fw loading support
*/
enum AMDGPU_UCODE_ID {
- AMDGPU_UCODE_ID_CAP = 0, /* CAP must be the 1st fw to be loaded */
- AMDGPU_UCODE_ID_SDMA0,
+ AMDGPU_UCODE_ID_SDMA0 = 0,
AMDGPU_UCODE_ID_SDMA1,
AMDGPU_UCODE_ID_SDMA2,
AMDGPU_UCODE_ID_SDMA3,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 006f21ef7ddf..62635e58e45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 42bbc0070831..f92c158d89a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1113,7 +1113,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -1940,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
} else {
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v10_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
@@ -4099,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 0 - Disable some blocks' MGCG */
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000);
+
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4138,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* 2 - disable MGLS in RLC */
+ /* 2 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in RLC */
data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
- /* 3 - disable MGLS in CP */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
- if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
- data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
- }
}
}
@@ -4261,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
- gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
+ /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
}
if (adev->cg_flags &
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba90a14089cf..0c390485bc10 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1217,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = true;
break;
default:
+ adev->gfx.me_fw_write_wait = true;
+ adev->gfx.mec_fw_write_wait = true;
break;
}
}
@@ -1232,6 +1234,8 @@ struct amdgpu_gfxoff_quirk {
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
+ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
{ 0, 0, 0, 0, 0 },
};
@@ -1946,7 +1950,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -4306,7 +4310,7 @@ static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x81 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index cceb46faf212..dce945ef21a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0d413fabd015..c0e3efcb09bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
-static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
- uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
@@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].sec_count_mask) >>
mmhub_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
mmhub_v9_4_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].ded_count_mask) >>
mmhub_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
mmhub_v9_4_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 033cbbca2072..52318b03c424 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = smu_baco_enter(smu);
if (ret)
return ret;
@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (ret)
return ret;
} else {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 6ff9a9544110..a44fd6060d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -246,7 +246,6 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
- GFX_FW_TYPE_CAP = 62, /* CAP_FW VG */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 43896f4779b0..735c43c7daab 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -44,7 +44,6 @@
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
-MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
@@ -64,7 +63,6 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
- struct amdgpu_firmware_info *info = NULL;
DRM_DEBUG("\n");
@@ -114,26 +112,6 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
- if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_VEGA10) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin",
- chip_name);
- err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.cap_fw);
- if (err)
- goto out;
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
- info->ucode_id = AMDGPU_UCODE_ID_CAP;
- info->fw = adev->psp.cap_fw;
- hdr = (const struct psp_firmware_header_v1_0 *)
- adev->psp.cap_fw->data;
- adev->firmware.fw_size += ALIGN(
- le32_to_cpu(hdr->header.ucode_size_bytes), PAGE_SIZE);
- }
-
return 0;
out:
if (err) {
@@ -144,8 +122,6 @@ out:
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
- release_firmware(adev->psp.cap_fw);
- adev->psp.cap_fw = NULL;
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 9159bd46482b..5f3a5ee2a3f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -677,7 +677,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
@@ -977,7 +977,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb4c891120b..0860e85a2d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a40499d51c93..d42a8d8a0dea 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return amdgpu_dpm_mode2_reset(adev);
default:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 78b35901643b..3ce10e05d0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index d5386f15c4a5..05bc6d96ec52 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1112,9 +1112,9 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
- kfree(mem_obj);
+ kfree(*mem_obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a4256780e70e..f7c5cdc10a70 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -524,8 +524,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state),
+ acrtc_state->active_planes);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base);
@@ -545,7 +546,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
&acrtc_state->vrr_params.adjust);
}
- if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+ /*
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
+ * In that case, pageflip completion interrupts won't fire and pageflip
+ * completion events won't get delivered. Prevent this by sending
+ * pending pageflip events from here if a flip is still pending.
+ *
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
+ * avoid race conditions between flip programming and completion,
+ * which could cause too early flip completion events.
+ */
+ if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
acrtc->event = NULL;
@@ -3627,6 +3639,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_NV12:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -4708,10 +4723,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
-#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
@@ -5523,6 +5538,8 @@ static int get_plane_formats(const struct drm_plane *plane,
if (plane_cap && plane_cap->pixel_format_support.nv12)
formats[num_formats++] = DRM_FORMAT_NV12;
+ if (plane_cap && plane_cap->pixel_format_support.p010)
+ formats[num_formats++] = DRM_FORMAT_P010;
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -5575,12 +5592,15 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
- plane_cap && plane_cap->pixel_format_support.nv12) {
+ plane_cap &&
+ (plane_cap->pixel_format_support.nv12 ||
+ plane_cap->pixel_format_support.p010)) {
/* This only affects YUV formats. */
drm_plane_create_color_properties(
plane,
BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
@@ -5909,7 +5929,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+ if (!aconnector->mst_port)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
@@ -5928,8 +5949,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
&aconnector->base.base,
dm->ddev->mode_config.hdr_output_metadata_property, 0);
- drm_connector_attach_vrr_capable_property(
- &aconnector->base);
+ if (!aconnector->mst_port)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
@@ -6252,12 +6274,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y <= -amdgpu_crtc->max_cursor_height)
return 0;
- if (crtc->primary->state) {
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
- }
-
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -6267,6 +6283,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y = 0;
}
position->enable = true;
+ position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5b70ed3cdb88..78e1c11d4ae5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -192,10 +192,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
&hdcp_work->srm_version);
display->adjust.disable = 0;
- if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
+ hdcp_w->link.adjust.hdcp1.disable = 0;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
- else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+ } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
+ hdcp_w->link.adjust.hdcp1.disable = 1;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
+ }
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -263,7 +266,7 @@ static void event_callback(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
- cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+ cancel_delayed_work(&hdcp_work->callback_dwork);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
&hdcp_work->output);
@@ -344,6 +347,8 @@ static void event_watchdog_timer(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
mod_hdcp_process_event(&hdcp_work->hdcp,
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
&hdcp_work->output);
@@ -414,7 +419,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
- link->adjust.auth_delay = 2;
+ link->adjust.auth_delay = 3;
+ link->adjust.hdcp1.disable = 0;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d56b758bcce5..3db1ec35d2b4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -416,6 +416,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_attach_encoder(&aconnector->base,
&aconnector->mst_encoder->base);
+ connector->max_bpc_property = master->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
+ connector->vrr_capable_property = master->base.vrr_capable_property;
+ if (connector->vrr_capable_property)
+ drm_connector_attach_vrr_capable_property(connector);
+
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ab267ddd4abe..24c5765890fa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
- if (clock_table->FClocks[i].Freq != 0) {
+ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
j = i;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2ffb22177df9..8489f1e56892 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0;
bool ret = false;
+ stream->adjust = *adjust;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1360,6 +1362,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -1370,6 +1392,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (is_flip_pending_in_pipes(dc, context))
+ return true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
@@ -1703,6 +1728,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->coeff_reduction_factor)
update_flags->bits.coeff_reduction_change = 1;
+ if (u->gamut_remap_matrix)
+ update_flags->bits.gamut_remap_change = 1;
+
if (u->gamma) {
enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
@@ -1728,7 +1756,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
- || update_flags->bits.gamma_change) {
+ || update_flags->bits.gamma_change
+ || update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -1832,8 +1861,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- } else if (dc->wm_optimized_required)
- dc->optimized_required = true;
+ }
+
+ dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -1973,6 +2003,10 @@ static void copy_surface_update_to_plane(
if (srf_update->coeff_reduction_factor)
surface->coeff_reduction_factor =
*srf_update->coeff_reduction_factor;
+
+ if (srf_update->gamut_remap_matrix)
+ surface->gamut_remap_matrix =
+ *srf_update->gamut_remap_matrix;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -2431,7 +2465,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
enum surface_update_type update_type;
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
- int i;
+ int i, j;
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -2469,6 +2503,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_surface_update_to_plane(surface, &srf_updates[i]);
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
}
copy_stream_update_to_stream(dc, context, stream, stream_update);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7cbb1efb4f68..aa3c45a69b5e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2530,7 +2530,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
link,
- DP_TEST_PHY_PATTERN,
+ DP_PHY_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 75c7ce4c7581..f4bcc71b2920 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1077,6 +1077,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* on certain displays, such as the Sharp 4k
*/
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d3ceb39e428e..1935cf6601eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -726,6 +726,7 @@ union surface_update_flags {
uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
+ uint32_t gamut_remap_change:1;
/* Full updates */
uint32_t new_plane:1;
@@ -760,6 +761,7 @@ struct dc_plane_state {
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
+ struct colorspace_transform gamut_remap_matrix;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
@@ -839,6 +841,7 @@ struct dc_surface_update {
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
+ const struct colorspace_transform *gamut_remap_matrix;
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 25c50bcab9e9..a8dc3082e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -385,6 +385,8 @@ struct dc_cursor_position {
*/
bool enable;
+ /* Translate cursor x/y by the source rectangle for each plane. */
+ bool translate_by_source;
};
struct dc_cursor_mi_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0976e378659f..c279982947e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2685,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ pos_cpy.x += pipe_ctx->plane_state->src_rect.x;
+ pos_cpy.y += pipe_ctx->plane_state->src_rect.y;
+ }
+
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9cc3314966bd..b0357546471b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2004,6 +2004,12 @@ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ } else if (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
@@ -3015,12 +3021,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
- // translate cursor from stream space to plane space
+ /**
+ * DC cursor is stream space, HW cursor is plane space and drawn
+ * as part of the framebuffer.
+ *
+ * Cursor position can't be negative, but hotspot can be used to
+ * shift cursor out of the plane bounds. Hotspot must be smaller
+ * than the cursor size.
+ */
+
+ /**
+ * Translate cursor from stream space to plane space.
+ *
+ * If the cursor is scaled then we need to scale the position
+ * to be in the approximately correct place. We can't do anything
+ * about the actual size being incorrect, that's a limitation of
+ * the hardware.
+ */
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ x_pos += pipe_ctx->plane_state->src_rect.x;
+ y_pos += pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /**
+ * If the position is negative then we need to add to the hotspot
+ * to shift the cursor outside the plane.
+ */
+
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
x_pos = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 63acb8ff7462..17d96ec6acd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -343,6 +343,23 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
}
/**
+ * optc1_set_timing_double_buffer() - DRR double buffering control
+ *
+ * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
+ * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
+ *
+ * Options: any time, start of frame, dp start of frame (range timing)
+ */
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode);
+}
+
+/**
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
@@ -1353,6 +1370,7 @@ void optc1_clear_optc_underflow(struct timing_generator *optc)
void optc1_tg_init(struct timing_generator *optc)
{
optc1_set_blank_data_double_buffer(optc, true);
+ optc1_set_timing_double_buffer(optc, true);
optc1_clear_optc_underflow(optc);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index f277656d5464..9a459a8fe8a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -185,6 +185,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
@@ -643,6 +644,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable);
+
bool optc1_get_otg_active_size(struct timing_generator *optc,
uint32_t *otg_active_width,
uint32_t *otg_active_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 261bdc3a8218..07265ca7d28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -552,7 +552,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -584,7 +585,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 233318260da4..22f421e82733 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1373,6 +1373,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index a67395208991..5cdbba0cd873 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1012,7 +1012,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -3342,7 +3343,7 @@ void dcn20_cap_soc_clocks(
void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
+ struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 51b5910cd05f..b25484aa8222 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -300,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 9
+ .num_states = 8
};
#ifndef MAX
@@ -838,7 +838,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -1376,21 +1377,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
unsigned int i, j, k;
int closest_clk_lvl;
- // diags does not retrieve proper values from SMU
- // cap states to 5 and make state 5 the max state
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn2_1_soc.num_states = 5;
-
- dcn2_1_soc.clock_limits[5].state = 5;
- dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
- dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
- dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
- dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
- dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
- dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
- dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
- dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
- } else {
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
@@ -1403,16 +1391,16 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
/*
- * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
- * as indicater
+ * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
+ * as indicator
*/
closest_clk_lvl = -1;
/* index currently being filled */
k = 1;
for (i = 1; i < clk_table->num_entries; i++) {
- /* loop backwards, skip duplicate state, +1 because SMU has precision issue */
- for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+ /* loop backwards, skip duplicate state*/
+ for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
@@ -1437,13 +1425,13 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
k++;
}
}
-
- /* duplicate last level */
- dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
- dcn2_1_soc.clock_limits[k].state = k;
- dcn2_1_soc.num_states = k + 1;
+ dcn2_1_soc.num_states = k;
}
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
index ea4cde952f4f..2a1983324629 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -29,7 +29,7 @@
#define DC__PRESENT 1
#define DC__PRESENT__1 1
#define DC__NUM_DPP 4
-#define DC__VOLTAGE_STATES 7
+#define DC__VOLTAGE_STATES 9
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1
#define DC__NUM_DPP__1_PRESENT 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index dfd3be452766..687010c17324 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
+
+#include "dc_features.h"
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 9
-
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -68,7 +69,7 @@ struct _vcs_dpi_voltage_scaling_st {
};
struct _vcs_dpi_soc_bounding_box_st {
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 8a87d0ed90ae..2359e88d6029 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -136,6 +136,7 @@
#define RAVEN2_A0 0x81
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
+#define RENOIR_A0 0x91
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
@@ -171,8 +172,6 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#define RENOIR_A0 0x91
-#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
@@ -183,6 +182,9 @@ enum {
#define DEVICE_ID_TEMASH_9839 0x9839
#define DEVICE_ID_TEMASH_983D 0x983D
+/* RENOIR */
+#define DEVICE_ID_RENOIR_1636 0x1636
+
/* Asic Family IDs for different asic family. */
#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4e542826cd26..c33454a9e0b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -734,6 +734,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
{
struct core_freesync *core_freesync = NULL;
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned long long rounded_nominal_in_uhz = 0;
unsigned int refresh_range = 0;
unsigned long long min_refresh_in_uhz = 0;
unsigned long long max_refresh_in_uhz = 0;
@@ -750,17 +751,20 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
- // Don't allow min > max
- if (min_refresh_in_uhz > max_refresh_in_uhz)
- min_refresh_in_uhz = max_refresh_in_uhz;
-
// Full range may be larger than current video timing, so cap at nominal
if (max_refresh_in_uhz > nominal_field_rate_in_uhz)
max_refresh_in_uhz = nominal_field_rate_in_uhz;
// Full range may be larger than current video timing, so cap at nominal
- if (min_refresh_in_uhz > nominal_field_rate_in_uhz)
- min_refresh_in_uhz = nominal_field_rate_in_uhz;
+ if (min_refresh_in_uhz > max_refresh_in_uhz)
+ min_refresh_in_uhz = max_refresh_in_uhz;
+
+ // If a monitor reports exactly max refresh of 2x of min, enforce it on nominal
+ rounded_nominal_in_uhz =
+ div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000;
+ if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) &&
+ in_config->max_refresh_in_uhz == rounded_nominal_in_uhz)
+ min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2);
if (!vrr_settings_require_update(core_freesync,
in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
@@ -792,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
- in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
- 2 * in_out_vrr->min_duration_in_us;
- if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
- in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-
in_out_vrr->supported = true;
}
@@ -804,9 +803,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.btr_enabled = in_config->btr;
- if (in_out_vrr->max_refresh_in_uhz <
- 2 * in_out_vrr->min_refresh_in_uhz)
+ if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz))
in_out_vrr->btr.btr_enabled = false;
+ else {
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+ }
in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
@@ -1008,8 +1012,8 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
unsigned int total = stream->timing.h_total * stream->timing.v_total;
/* Calculate nominal field rate for stream, rounded up to nearest integer */
- nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
- nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz;
+ nominal_field_rate_in_uhz *= 100000000ULL;
nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index e9fbd94f8635..cc1d3f470b99 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -328,8 +328,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
- status = mod_hdcp_add_display_to_topology(hdcp, display_container);
-
+ status = mod_hdcp_add_display_to_topology(hdcp, display->index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
@@ -375,7 +374,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- memset(display, 0, sizeof(struct mod_hdcp_display));
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 60ff1a0028ac..5cb4546be0ef 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* psp functions */
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -503,6 +503,11 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -510,23 +515,34 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t active_count = 0;
+ uint8_t added_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_active(&hdcp->displays[i]))
- active_count++;
- return active_count;
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->displays[i]))
+ added_count++;
+ return added_count;
}
-static inline struct mod_hdcp_display *get_first_active_display(
+static inline struct mod_hdcp_display *get_first_added_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->displays[i])) {
+ if (is_display_added(&hdcp->displays[i])) {
display = &hdcp->displays[i];
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..37c8c05497d6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 549c113abcf7..491c00f48026 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 836e47954938..c2929815c3ee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -54,7 +54,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- if (!display || !is_display_active(display))
+ if (!display || !is_display_added(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -73,21 +73,25 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
return MOD_HDCP_STATUS_SUCCESS;
- }
-
-enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
+
+}
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
+ uint8_t index)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display || is_display_added(display))
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -109,11 +113,10 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- }
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
return MOD_HDCP_STATUS_SUCCESS;
@@ -123,7 +126,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
@@ -176,7 +179,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
if (is_display_encryption_enabled(
&hdcp->displays[i])) {
hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE;
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_HDCP1_DISABLED_TRACE(hdcp,
hdcp->displays[i].index);
}
@@ -228,7 +231,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -298,7 +301,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -360,7 +364,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -419,7 +423,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
if (is_display_encryption_enabled(
&hdcp->displays[i])) {
hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE;
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_HDCP2_DISABLED_TRACE(hdcp,
hdcp->displays[i].index);
}
@@ -658,7 +662,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -743,7 +747,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index eae9309cfb24..c088602bc1a0 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -117,6 +117,7 @@ enum mod_hdcp_operation_mode {
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c195575366a3..2a12614a12c2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1452,7 +1452,8 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f6d4b0ef46ad..e8b27fab6aa1 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1154,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
}
}
+
+ if (smu->ppt_funcs->set_power_source) {
+ /*
+ * For Navi1X, manually switch it to AC mode as PMFW
+ * may boot it with DC mode.
+ */
+ if (adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret) {
+ pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
}
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
@@ -2072,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
return 0;
}
+int smu_set_ac_dc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ /* controlled by firmware */
+ if (smu->dc_controlled_by_gpio)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->set_power_source) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret)
+ pr_err("Failed to switch to %s mode!\n",
+ smu->adev->pm.ac_power ? "AC" : "DC");
+ }
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index c6d3bef15320..1ef0923f7190 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -35,6 +35,7 @@
#include "arcturus_ppt.h"
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/i2c.h>
@@ -793,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
struct arcturus_dpm_table *dpm_table;
struct arcturus_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
+ uint32_t smu_version;
int ret = 0;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1511,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return 0;
}
+static int arcturus_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return smu_v11_0_set_performance_level(smu, level);
+}
+
static void arcturus_dump_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -2210,6 +2256,18 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
i2c_del_adapter(control);
}
+static bool arcturus_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
@@ -2272,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
- .set_performance_level = smu_v11_0_set_performance_level,
+ .set_performance_level = arcturus_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
@@ -2321,7 +2379,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= arcturus_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 7740488999df..4795eb66b2b2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{
uint32_t i;
+ /* force the trim if mclk_switching is disabled to prevent flicker */
+ bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ if ((!hwmgr->od_enabled || force_trim)
+ && (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 657a6f17e91f..ae2c318dd6fa 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -408,6 +408,7 @@ struct smu_context
uint32_t smc_if_version;
bool uploading_custom_pp_table;
+ bool dc_controlled_by_gpio;
};
struct i2c_adapter;
@@ -570,6 +571,7 @@ struct pptable_funcs {
int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+ int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
};
int smu_load_microcode(struct smu_context *smu);
@@ -718,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+int smu_set_ac_dc(struct smu_context *smu);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 1c88219fe403..674e426ed59b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index d66dfa7410b6..15030284b444 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -28,13 +28,15 @@
#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
-#include "nbio/nbio_7_4_sh_mask.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -347,7 +349,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
@@ -391,6 +392,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
/* only for navi10 A0 */
@@ -525,6 +529,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
@@ -1980,6 +1987,18 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
return 0;
}
+static bool navi10_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
@@ -2356,7 +2375,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= navi10_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
@@ -2369,6 +2388,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_pptable_power_limit = navi10_get_pptable_power_limit,
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+ .set_power_source = smu_v11_0_set_power_source,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..ff73a735b888 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+ bool cur_value_match_level = false;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+
return size;
}
@@ -887,6 +893,17 @@ static int renoir_read_sensor(struct smu_context *smu,
return ret;
}
+static bool renoir_is_dpm_running(struct smu_context *smu)
+{
+ /*
+ * Util now, the pmfw hasn't exported the interface of SMU
+ * feature mask to APU SKU so just force on all the feature
+ * at early initial stage.
+ */
+ return true;
+
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_clk_index = renoir_get_smu_clk_index,
@@ -927,6 +944,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
+ .is_dpm_running = renoir_is_dpm_running,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq; \
break; \
case SMU_MCLK: \
- freq = table->MemClocks[dpm_level].Freq; \
+ freq = table->FClocks[dpm_level].Freq; \
break; \
case SMU_DCEFCLK: \
freq = table->DcfClocks[dpm_level].Freq; \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 6900877de845..40c35bcc5a0a 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -211,4 +211,7 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
#define smu_disable_umc_cdr_12gbps_workaround(smu) \
((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+#define smu_set_power_source(smu, power_src) \
+ ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 4fd77c7cfc80..655ba4fb05dc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -42,8 +42,6 @@
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
-#include "asic_reg/nbio/nbio_7_4_offset.h"
-#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
@@ -1525,6 +1523,13 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
return ret;
}
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
+}
+
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
@@ -1558,6 +1563,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == 0xfe)
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
}
return 0;
@@ -1597,6 +1605,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ 0xfe,
+ irq_src);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -1646,9 +1660,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
- uint32_t val;
bool baco_support;
mutex_lock(&smu_baco->mutex);
@@ -1663,11 +1675,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
-
- return false;
+ return true;
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
@@ -1684,11 +1692,9 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
-
struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint32_t bif_doorbell_intr_cntl;
uint32_t data;
int ret = 0;
@@ -1697,14 +1703,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
- bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
-
if (state == SMU_BACO_STATE_ENTER) {
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 1);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
-
if (!ras || !ras->supported) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
data |= 0x80000000;
@@ -1719,10 +1718,11 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (ret)
goto out;
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ if (ras && ras->supported) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ if (ret)
+ goto out;
+ }
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
@@ -1939,3 +1939,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
return ret;
}
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 49ff3756bd9f..3f1044326dcb 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -35,6 +35,7 @@
#include "vega20_ppt.h"
#include "vega20_pptable.h"
#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -3174,6 +3175,17 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
return ret;
}
+static bool vega20_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
@@ -3262,7 +3274,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= vega20_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index ea5cd1e17304..e7933930a657 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match);
-static int komeda_rt_pm_suspend(struct device *dev)
+static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
return komeda_dev_suspend(mdrv->mdev);
}
-static int komeda_rt_pm_resume(struct device *dev)
+static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 952199cc0462..dce4672e3fc8 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -157,10 +157,8 @@ int bochs_hw_init(struct drm_device *dev)
size = min(size, mem);
}
- if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
- DRM_ERROR("Cannot request framebuffer\n");
- return -EBUSY;
- }
+ if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+ DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 9ded2cef57dd..76736fb8ed94 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1652,8 +1652,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
}
struct analogix_dp_device *
-analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data)
+analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
@@ -1756,22 +1755,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_pm_runtime;
+ return ERR_PTR(ret);
}
disable_irq(dp->irq);
+ return dp;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_probe);
+
+int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
+{
+ int ret;
+
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
dp->aux.name = "DP-AUX";
dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = &pdev->dev;
+ dp->aux.dev = dp->dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- pm_runtime_enable(dev);
+ pm_runtime_enable(dp->dev);
ret = analogix_dp_create_bridge(drm_dev, dp);
if (ret) {
@@ -1779,13 +1786,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
- return dp;
+ return 0;
err_disable_pm_runtime:
+ pm_runtime_disable(dp->dev);
- pm_runtime_disable(dev);
-
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(analogix_dp_bind);
@@ -1802,10 +1808,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
drm_dp_aux_unregister(&dp->aux);
pm_runtime_disable(dp->dev);
- clk_disable_unprepare(dp->clock);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
+void analogix_dp_remove(struct analogix_dp_device *dp)
+{
+ clk_disable_unprepare(dp->clock);
+}
+EXPORT_SYMBOL_GPL(analogix_dp_remove);
+
#ifdef CONFIG_PM
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ec3b06433d98..30681398cfb0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1659,28 +1659,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
frame.colorspace = HDMI_COLORSPACE_RGB;
/* Set up colorimetry */
- switch (hdmi->hdmi_data.enc_out_encoding) {
- case V4L2_YCBCR_ENC_601:
- if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
- frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
- else
+ if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi->hdmi_data.enc_out_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
+ frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+ else
+ frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
+ case V4L2_YCBCR_ENC_709:
+ if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
+ frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+ else
+ frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+ break;
+ default: /* Carries no data */
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ frame.extended_colorimetry =
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
+ }
+ } else {
+ frame.colorimetry = HDMI_COLORIMETRY_NONE;
frame.extended_colorimetry =
- HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- break;
- case V4L2_YCBCR_ENC_709:
- if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
- frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
- else
- frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
- frame.extended_colorimetry =
- HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
- break;
- default: /* Carries no data */
- frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
- frame.extended_colorimetry =
- HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- break;
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
/*
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index c6fbe6e6bc9d..612a59ec8116 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+ /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
#undef OUI
@@ -1533,3 +1535,97 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
return num_bpc;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+
+/**
+ * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data)
+{
+ int err;
+ u8 rate, lanes;
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ if (err < 0)
+ return err;
+ data->link_rate = drm_dp_bw_code_to_link_rate(rate);
+
+ err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ if (err < 0)
+ return err;
+ data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
+
+ if (lanes & DP_ENHANCED_FRAME_CAP)
+ data->enhanced_frame_cap = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ if (err < 0)
+ return err;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
+ if (err < 0)
+ return err;
+
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
+
+/**
+ * drm_dp_set_phy_test_pattern() - set the pattern to the sink.
+ * @aux: DisplayPort AUX channel
+ * @data: DP phy compliance test parameters.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev)
+{
+ int err, i;
+ u8 link_config[2];
+ u8 test_pattern;
+
+ link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
+ link_config[1] = data->num_lanes;
+ if (data->enhanced_frame_cap)
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
+ if (err < 0)
+ return err;
+
+ test_pattern = data->phy_pattern;
+ if (dp_rev < 0x12) {
+ test_pattern = (test_pattern << 2) &
+ DP_LINK_QUAL_PATTERN_11_MASK;
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
+ if (err < 0)
+ return err;
+ } else {
+ for (i = 0; i < data->num_lanes; i++) {
+ err = drm_dp_dpcd_writeb(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6529b479645c..13213c4b77d1 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1928,7 +1928,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
{
switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -1958,13 +1958,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
/* Teardown the old pdt, if there is one */
if (port->pdt != DP_PEER_DEVICE_NONE) {
- if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/*
* If the new PDT would also have an i2c bus,
* don't bother with reregistering it
*/
if (new_pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
port->pdt = new_pdt;
port->mcs = new_mcs;
return 0;
@@ -1984,7 +1984,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
port->mcs = new_mcs;
if (port->pdt != DP_PEER_DEVICE_NONE) {
- if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
} else {
@@ -2163,7 +2163,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2293,14 +2293,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_unlock(&mgr->lock);
}
- if (old_ddps != port->ddps) {
- if (port->ddps) {
- if (!port->input) {
- drm_dp_send_enum_path_resources(mgr, mstb,
- port);
- }
+ /*
+ * Reprobe PBN caps on both hotplug, and when re-probing the link
+ * for our parent mstb
+ */
+ if (old_ddps != port->ddps || !created) {
+ if (port->ddps && !port->input) {
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
+ if (ret == 1)
+ changed = true;
} else {
- port->available_pbn = 0;
+ port->full_pbn = 0;
}
}
@@ -2392,11 +2396,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
- if (port->ddps) {
- dowork = true;
- } else {
- port->available_pbn = 0;
- }
+ if (port->ddps && !port->input)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+ else
+ port->full_pbn = 0;
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
@@ -2547,13 +2550,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
if (port->input || !port->ddps)
continue;
- if (!port->available_pbn) {
- drm_modeset_lock(&mgr->base.lock, NULL);
- drm_dp_send_enum_path_resources(mgr, mstb, port);
- drm_modeset_unlock(&mgr->base.lock);
- changed = true;
- }
-
if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
@@ -2984,6 +2980,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ ret = 0;
path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@@ -2996,14 +2993,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->port_number,
path_res->full_payload_bw_number,
path_res->avail_payload_bw_number);
- port->available_pbn =
- path_res->avail_payload_bw_number;
+
+ /*
+ * If something changed, make sure we send a
+ * hotplug
+ */
+ if (port->full_pbn != path_res->full_payload_bw_number ||
+ port->fec_capable != path_res->fec_capable)
+ ret = 1;
+
+ port->full_pbn = path_res->full_payload_bw_number;
port->fec_capable = path_res->fec_capable;
}
}
kfree(txmsg);
- return 0;
+ return ret;
}
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
@@ -3577,13 +3582,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
/* The link address will need to be re-sent on resume */
mstb->link_address_sent = false;
- list_for_each_entry(port, &mstb->ports, next) {
- /* The PBN for each port will also need to be re-probed */
- port->available_pbn = 0;
-
+ list_for_each_entry(port, &mstb->ports, next)
if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
- }
}
/**
@@ -4861,41 +4862,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false;
}
-static inline
-int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
- struct drm_dp_mst_topology_state *mst_state)
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_topology_state *state);
+
+static int
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_mst_port *port;
struct drm_dp_vcpi_allocation *vcpi;
- int pbn_limit = 0, pbn_used = 0;
+ struct drm_dp_mst_port *port;
+ int pbn_used = 0, ret;
+ bool found = false;
- list_for_each_entry(port, &branch->ports, next) {
- if (port->mstb)
- if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
- return -ENOSPC;
+ /* Check that we have at least one port in our state that's downstream
+ * of this branch, otherwise we can skip this branch
+ */
+ list_for_each_entry(vcpi, &state->vcpis, next) {
+ if (!vcpi->pbn ||
+ !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+ continue;
- if (port->available_pbn > 0)
- pbn_limit = port->available_pbn;
+ found = true;
+ break;
}
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
- branch, pbn_limit);
+ if (!found)
+ return 0;
- list_for_each_entry(vcpi, &mst_state->vcpis, next) {
- if (!vcpi->pbn)
- continue;
+ if (mstb->port_parent)
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
+ mstb->port_parent->parent, mstb->port_parent,
+ mstb);
+ else
+ DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
+ mstb);
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+ if (ret < 0)
+ return ret;
+
+ pbn_used += ret;
+ }
+
+ return pbn_used;
+}
+
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_topology_state *state)
+{
+ struct drm_dp_vcpi_allocation *vcpi;
+ int pbn_used = 0;
+
+ if (port->pdt == DP_PEER_DEVICE_NONE)
+ return 0;
+
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+ bool found = false;
+
+ list_for_each_entry(vcpi, &state->vcpis, next) {
+ if (vcpi->port != port)
+ continue;
+ if (!vcpi->pbn)
+ return 0;
+
+ found = true;
+ break;
+ }
+ if (!found)
+ return 0;
- if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
- pbn_used += vcpi->pbn;
+ /* This should never happen, as it means we tried to
+ * set a mode before querying the full_pbn
+ */
+ if (WARN_ON(!port->full_pbn))
+ return -EINVAL;
+
+ pbn_used = vcpi->pbn;
+ } else {
+ pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
+ state);
+ if (pbn_used <= 0)
+ return pbn_used;
}
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
- branch, pbn_used);
- if (pbn_used > pbn_limit) {
- DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
- branch);
+ if (pbn_used > port->full_pbn) {
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
+ port->parent, port, pbn_used,
+ port->full_pbn);
return -ENOSPC;
}
- return 0;
+
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
+ port->parent, port, pbn_used, port->full_pbn);
+
+ return pbn_used;
}
static inline int
@@ -5093,9 +5155,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
if (ret)
break;
- ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
- if (ret)
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+ mst_state);
+ mutex_unlock(&mgr->lock);
+ if (ret < 0)
break;
+ else
+ ret = 0;
}
return ret;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index c4c704e01961..eb009d3ab48f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -48,6 +48,11 @@
#include "drm_internal.h"
#include "drm_legacy.h"
+#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#include <uapi/asm/mman.h>
+#include <drm/drm_vma_manager.h>
+#endif
+
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -872,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
+
+#ifdef CONFIG_MMU
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * drm_addr_inflate() attempts to construct an aligned area by inflating
+ * the area size and skipping the unaligned start of the area.
+ * adapted from shmem_get_unmapped_area()
+ */
+static unsigned long drm_addr_inflate(unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags,
+ unsigned long huge_size)
+{
+ unsigned long offset, inflated_len;
+ unsigned long inflated_addr;
+ unsigned long inflated_offset;
+
+ offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
+ if (offset && offset + len < 2 * huge_size)
+ return addr;
+ if ((addr & (huge_size - 1)) == offset)
+ return addr;
+
+ inflated_len = len + huge_size - PAGE_SIZE;
+ if (inflated_len > TASK_SIZE)
+ return addr;
+ if (inflated_len < len)
+ return addr;
+
+ inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
+ 0, flags);
+ if (IS_ERR_VALUE(inflated_addr))
+ return addr;
+ if (inflated_addr & ~PAGE_MASK)
+ return addr;
+
+ inflated_offset = inflated_addr & (huge_size - 1);
+ inflated_addr += offset - inflated_offset;
+ if (inflated_offset > offset)
+ inflated_addr += huge_size;
+
+ if (inflated_addr > TASK_SIZE - len)
+ return addr;
+
+ return inflated_addr;
+}
+
+/**
+ * drm_get_unmapped_area() - Get an unused user-space virtual memory area
+ * suitable for huge page table entries.
+ * @file: The struct file representing the address space being mmap()'d.
+ * @uaddr: Start address suggested by user-space.
+ * @len: Length of the area.
+ * @pgoff: The page offset into the address space.
+ * @flags: mmap flags
+ * @mgr: The address space manager used by the drm driver. This argument can
+ * probably be removed at some point when all drivers use the same
+ * address space manager.
+ *
+ * This function attempts to find an unused user-space virtual memory area
+ * that can accommodate the size we want to map, and that is properly
+ * aligned to facilitate huge page table entries matching actual
+ * huge pages or huge page aligned memory in buffer objects. Buffer objects
+ * are assumed to start at huge page boundary pfns (io memory) or be
+ * populated by huge pages aligned to the start of the buffer object
+ * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
+ *
+ * Return: aligned user-space address.
+ */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ unsigned long addr;
+ unsigned long inflated_addr;
+ struct drm_vma_offset_node *node;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /*
+ * @pgoff is the file page-offset the huge page boundaries of
+ * which typically aligns to physical address huge page boundaries.
+ * That's not true for DRM, however, where physical address huge
+ * page boundaries instead are aligned with the offset from
+ * buffer object start. So adjust @pgoff to be the offset from
+ * buffer object start.
+ */
+ drm_vma_offset_lock_lookup(mgr);
+ node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
+ if (node)
+ pgoff -= node->vm_node.start;
+ drm_vma_offset_unlock_lookup(mgr);
+
+ addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ if (addr & ~PAGE_MASK)
+ return addr;
+ if (addr > TASK_SIZE - len)
+ return addr;
+
+ if (len < HPAGE_PMD_SIZE)
+ return addr;
+ if (flags & MAP_FIXED)
+ return addr;
+ /*
+ * Our priority is to support MAP_SHARED mapped hugely;
+ * and support MAP_PRIVATE mapped hugely too, until it is COWed.
+ * But if caller specified an address hint, respect that as before.
+ */
+ if (uaddr)
+ return addr;
+
+ inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
+ HPAGE_PMD_SIZE);
+
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
+ len >= HPAGE_PUD_SIZE)
+ inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
+ flags, HPAGE_PUD_SIZE);
+ return inflated_addr;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
+#endif /* CONFIG_MMU */
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index b481cafdde28..825abe38201a 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
DRM_DEBUG_LEASE("Creating lease\n");
+ /* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
if (IS_ERR(lessee)) {
ret = PTR_ERR(lessee);
+ idr_destroy(&leases);
goto out_leases;
}
@@ -580,7 +582,6 @@ out_lessee:
out_leases:
put_unused_fd(fd);
- idr_destroy(&leases);
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 86d9b0e45c8c..282774e469ac 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len, index;
+ u32 page_len, page_index;
dma_addr_t addr;
+ u32 dma_len, dma_index;
- index = 0;
+ /*
+ * Scatterlist elements contains both pages and DMA addresses, but
+ * one shoud not assume 1:1 relation between them. The sg->length is
+ * the size of the physical memory chunk described by the sg->page,
+ * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
+ * described by the sg_dma_address(sg).
+ */
+ page_index = 0;
+ dma_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
- len = sg->length;
+ page_len = sg->length;
page = sg_page(sg);
+ dma_len = sg_dma_len(sg);
addr = sg_dma_address(sg);
- while (len > 0) {
- if (WARN_ON(index >= max_entries))
+ while (pages && page_len > 0) {
+ if (WARN_ON(page_index >= max_entries))
return -1;
- if (pages)
- pages[index] = page;
- if (addrs)
- addrs[index] = addr;
-
+ pages[page_index] = page;
page++;
+ page_len -= PAGE_SIZE;
+ page_index++;
+ }
+ while (addrs && dma_len > 0) {
+ if (WARN_ON(dma_index >= max_entries))
+ return -1;
+ addrs[dma_index] = addr;
addr += PAGE_SIZE;
- len -= PAGE_SIZE;
- index++;
+ dma_len -= PAGE_SIZE;
+ dma_index++;
}
}
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8428ae12dfa5..1f79bc2a881e 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index e7b58097ccdc..f2d87a7445c7 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -40,6 +40,7 @@
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index a61482af2998..9ac51b6ab34b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -156,15 +156,8 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
int ret;
- dp->dev = dev;
dp->drm_dev = drm_dev;
- dp->plat_data.dev_type = EXYNOS_DP;
- dp->plat_data.power_on_start = exynos_dp_poweron;
- dp->plat_data.power_off = exynos_dp_poweroff;
- dp->plat_data.attach = exynos_dp_bridge_attach;
- dp->plat_data.get_modes = exynos_dp_get_modes;
-
if (!dp->plat_data.panel && !dp->ptn_bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
@@ -181,13 +174,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->plat_data.encoder = encoder;
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
+ ret = analogix_dp_bind(dp->adp, dp->drm_dev);
+ if (ret)
dp->encoder.funcs->destroy(&dp->encoder);
- return PTR_ERR(dp->adp);
- }
- return 0;
+ return ret;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -218,6 +209,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ dp->dev = dev;
/*
* We just use the drvdata until driver run into component
* add function, and then we would set drvdata to null, so
@@ -243,16 +235,29 @@ static int exynos_dp_probe(struct platform_device *pdev)
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = EXYNOS_DP;
+ dp->plat_data.power_on_start = exynos_dp_poweron;
+ dp->plat_data.power_off = exynos_dp_poweroff;
+ dp->plat_data.attach = exynos_dp_bridge_attach;
+ dp->plat_data.get_modes = exynos_dp_get_modes;
dp->plat_data.skip_connector = !!bridge;
+
dp->ptn_bridge = bridge;
out:
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(&pdev->dev, &exynos_dp_ops);
}
static int exynos_dp_remove(struct platform_device *pdev)
{
+ struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_dp_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 9ebc02768847..619f81435c1b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
* mapping.
*/
static int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
return ret;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
- if (to_dma_iommu_mapping(subdrv_dev))
+ /*
+ * Keep the original DMA mapping of the sub-device and
+ * restore it on Exynos DRM detach, otherwise the DMA
+ * framework considers it as IOMMU-less during the next
+ * probe (in case of deferred probe or modular build)
+ */
+ *dma_priv = to_dma_iommu_mapping(subdrv_dev);
+ if (*dma_priv)
arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
* mapping
*/
static void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev);
- else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ arm_iommu_attach_device(subdrv_dev, *dma_priv);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
clear_dma_max_seg_size(subdrv_dev);
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
struct exynos_drm_private *priv = drm->dev_private;
@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
priv->mapping = mapping;
}
- return drm_iommu_attach_device(drm, dev);
+ return drm_iommu_attach_device(drm, dev, dma_priv);
}
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
- drm_iommu_detach_device(drm, dev);
+ drm_iommu_detach_device(drm, dev, dma_priv);
}
void exynos_drm_cleanup_dma(struct drm_device *drm)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d4d21d8cfb90..6ae9056e7a18 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return priv->mapping ? true : false;
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
void exynos_drm_cleanup_dma(struct drm_device *drm);
#ifdef CONFIG_DRM_EXYNOS_DPI
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8ea2e1d77802..29ab8be8604c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -97,6 +97,7 @@ struct fimc_scaler {
struct fimc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 21aec38702fc..bb67cad8371f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_atomic_disable(ctx->crtc);
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2a3382d43bc9..fcee33a43aca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
struct g2d_data {
struct device *dev;
+ void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = exynos_drm_register_dma(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_unregister_dma(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 88b6fcaa20be..45e9aee8366a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -97,6 +97,7 @@ struct gsc_scaler {
struct gsc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index b98482990d1a..dafa87b82052 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -56,6 +56,7 @@ struct rot_variant {
struct rot_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock;
@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 497973e9b2c5..93c43c8d914e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -39,6 +39,7 @@ struct scaler_data {
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
+ &scaler->dma_priv);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 38ae9c32feef..21b726baedea 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -94,6 +94,7 @@ struct mixer_context {
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
index d9a77f3b59b2..81972dce1aff 100644
--- a/drivers/gpu/drm/i915/.gitignore
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.hdrtest
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9f887a86e555..44c506b7e117 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,9 +28,6 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-subdir-ccflags-y += \
- $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -92,6 +89,7 @@ gt-y += \
gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_ggtt.o \
+ gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
@@ -153,7 +151,6 @@ i915-y += \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
- i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
i915_globals.o \
@@ -167,14 +164,18 @@ i915-y += \
# general-purpose microcontroller (GuC) support
i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
gt/uc/intel_guc_fw.o \
gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
gt/uc/intel_huc_fw.o
# modesetting core code
@@ -243,23 +244,6 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-# perf code
-i915-y += \
- oa/i915_oa_hsw.o \
- oa/i915_oa_bdw.o \
- oa/i915_oa_chv.o \
- oa/i915_oa_sklgt2.o \
- oa/i915_oa_sklgt3.o \
- oa/i915_oa_sklgt4.o \
- oa/i915_oa_bxt.o \
- oa/i915_oa_kblgt2.o \
- oa/i915_oa_kblgt3.o \
- oa/i915_oa_glk.o \
- oa/i915_oa_cflgt2.o \
- oa/i915_oa_cflgt3.o \
- oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o \
- oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 17cee6f80d8b..99a25c0bb08f 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
static int dsi_send_pkt_payld(struct intel_dsi_host *host,
struct mipi_dsi_packet pkt)
{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
/* payload queue can accept *256 bytes*, check limit */
if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
- DRM_ERROR("payload size exceeds max queue limit\n");
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
return -1;
}
/* load data into command payload queue */
if (!add_payld_to_queue(host, pkt.payload,
pkt.payload_length)) {
- DRM_ERROR("adding payload to queue failed\n");
+ drm_err(&i915->drm, "adding payload to queue failed\n");
return -1;
}
@@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= VIDEO_MODE_SYNC_PULSE;
break;
}
+ } else {
+ /*
+ * FIXME: Retrieve this info from VBT.
+ * As per the spec when dsi transcoder is operating
+ * in TE GATE mode, TE comes from GPIO
+ * which is UTIL PIN for DSI 0.
+ * Also this GPIO would not be used for other
+ * purposes is an assumption.
+ */
+ tmp &= ~OP_MODE_MASK;
+ tmp |= CMD_MODE_TE_GATE;
+ tmp |= TE_SOURCE_GPIO;
}
intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
@@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
hactive = adjusted_mode->crtc_hdisplay;
- htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+
+ if (is_vid_mode(intel_dsi))
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ else
+ htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
+
hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
- vtotal = adjusted_mode->crtc_vtotal;
+
+ if (is_vid_mode(intel_dsi)) {
+ vtotal = adjusted_mode->crtc_vtotal;
+ } else {
+ int bpp, line_time_us, byte_clk_period_ns;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
+ line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
+ vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
+ }
vsync_start = adjusted_mode->crtc_vsync_start;
vsync_end = adjusted_mode->crtc_vsync_end;
vsync_shift = hsync_start - htotal / 2;
@@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
/* TRANS_HSYNC register to be programmed only for video mode */
- if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (is_vid_mode(intel_dsi)) {
if (intel_dsi->video_mode_format ==
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
@@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (vsync_start < vactive)
drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
- /* program TRANS_VSYNC register */
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ /* program TRANS_VSYNC register for video mode only */
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
}
/*
- * FIXME: It has to be programmed only for interlaced
+ * FIXME: It has to be programmed only for video modes and interlaced
* modes. Put the check condition here once interlaced
* info available as described above.
* program TRANS_VSYNCSHIFT register
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ vsync_shift);
+ }
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
@@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
}
}
+static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+
+ /*
+ * used as TE i/p for DSI0,
+ * for dual link/DSI1 TE is from slave DSI1
+ * through GPIO.
+ */
+ if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+ return;
+
+ tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+
+ if (enable) {
+ tmp |= UTIL_PIN_DIRECTION_INPUT;
+ tmp |= UTIL_PIN_ENABLE;
+ } else {
+ tmp &= ~UTIL_PIN_ENABLE;
+ }
+ intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+}
+
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* setup D-PHY timings */
gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+ /* Since transcoder is configured to take events from GPIO */
+ gen11_dsi_config_util_pin(encoder, true);
+
/* step 4h: setup DSI protocol timeouts */
gen11_dsi_setup_timeouts(encoder, crtc_state);
@@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
wait_for_cmds_dispatched_to_panel(encoder);
}
-static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
-static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1118,7 +1188,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
}
-static void gen11_dsi_enable(struct intel_encoder *encoder,
+static void gen11_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum transcoder dsi_trans;
u32 tmp;
+ /* disable periodic update mode */
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+ tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
+ intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ }
+ }
+
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
@@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_disable(struct intel_encoder *encoder,
+static void gen11_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
/* step3: disable port */
gen11_dsi_disable_port(encoder);
+ gen11_dsi_config_util_pin(encoder, false);
+
/* step4: disable IO power */
gen11_dsi_disable_io_power(encoder);
}
-static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
}
+static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder dsi_trans;
+ u32 val;
+
+ if (intel_dsi->ports == BIT(PORT_B))
+ dsi_trans = TRANSCODER_DSI_1;
+ else
+ dsi_trans = TRANSCODER_DSI_0;
+
+ val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+}
+
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+ if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@@ -1417,6 +1521,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -1446,10 +1551,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->clock_set = true;
if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
- DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+ drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
+ /* We would not operate in periodic command mode */
+ pipe_config->hw.adjusted_mode.private_flags &=
+ ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+
+ /*
+ * In case of TE GATE cmd mode, we
+ * receive TE from the slave if
+ * dual link is enabled
+ */
+ if (is_cmd_mode(intel_dsi)) {
+ if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1 |
+ I915_MODE_FLAG_DSI_USE_TE0;
+ else if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE1;
+ else
+ pipe_config->hw.adjusted_mode.private_flags |=
+ I915_MODE_FLAG_DSI_USE_TE0;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 457b258683d3..25dfeb3197aa 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->hw.color_range = from_plane_state->uapi.color_range;
}
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->nv12_planes &= ~BIT(plane->id);
+ crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
+
+ plane_state->uapi.visible = false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- new_crtc_state->active_planes &= ~BIT(plane->id);
- new_crtc_state->nv12_planes &= ~BIT(plane->id);
- new_crtc_state->c8_planes &= ~BIT(plane->id);
- new_crtc_state->data_rate[plane->id] = 0;
- new_crtc_state->min_cdclk[plane->id] = 0;
- new_plane_state->uapi.visible = false;
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index a6bbf42bae1f..59dd1fbb02ea 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc);
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 62f234f641de..57b80971ae78 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -252,14 +252,16 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
- DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
- adjusted_mode->crtc_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+ adjusted_mode->crtc_clock);
i = 1;
}
- DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
- hdmi_audio_clock[i].clock,
- hdmi_audio_clock[i].config);
+ drm_dbg_kms(&dev_priv->drm,
+ "Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
return hdmi_audio_clock[i].config;
}
@@ -891,7 +893,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (dev_priv->audio_power_refcount++ == 0) {
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
dev_priv->audio_freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
@@ -931,7 +933,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
unsigned long cookie;
u32 tmp;
- if (!IS_GEN(dev_priv, 9))
+ if (INTEL_GEN(dev_priv) < 9)
return;
cookie = i915_audio_component_get_power(kdev);
@@ -1173,7 +1175,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
- if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
AUD_FREQ_CNTRL);
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 58b264bc318d..88f367eb28ea 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -338,16 +338,17 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index c1cce93a1c25..98ece9cd7cdd 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
+static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
+ entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
+}
+
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
@@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
}
}
+static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
else
return i9xx_gamma_precision(crtc_state);
} else {
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_gamma_precision(crtc_state);
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
@@ -1658,9 +1688,9 @@ static bool err_check(struct drm_color_lut *lut1,
((abs((long)lut2->green - lut1->green)) <= err);
}
-static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
- struct drm_color_lut *lut2,
- int lut_size, u32 err)
+static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
{
int i;
@@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
lut_size2 = drm_color_lut_size(blob2);
/* check sw and hw lut size */
- switch (gamma_mode) {
- case GAMMA_MODE_MODE_8BIT:
- case GAMMA_MODE_MODE_10BIT:
- if (lut_size1 != lut_size2)
- return false;
- break;
- default:
- MISSING_CASE(gamma_mode);
- return false;
- }
+ if (lut_size1 != lut_size2)
+ return false;
lut1 = blob1->data;
lut2 = blob2->data;
@@ -1707,11 +1729,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
err = 0xffff >> bit_precision;
/* check sw and hw lut entry to be equal */
- switch (gamma_mode) {
+ switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
case GAMMA_MODE_MODE_10BIT:
- if (!intel_color_lut_entry_equal(lut1, lut2,
- lut_size2, err))
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ 9, err))
return false;
break;
default:
@@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
+static struct drm_property_blob *
+icl_read_lut_multi_segment(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+
+ icl_lut_multi_seg_pack(&lut[i], ldw, udw);
+ }
+
+ intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+
+ /*
+ * FIXME readouts from PAL_PREC_DATA register aren't giving
+ * correct values in the case of fine and coarse segments.
+ * Restricting readouts only for super fine segment as of now.
+ */
+
+ return blob;
+}
+
+static void icl_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+ break;
+ default:
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ }
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
+ dev_priv->display.read_luts = icl_read_luts;
} else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.read_luts = glk_read_luts;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 903e49659f56..98ec2ea86c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -290,7 +290,7 @@ intel_attach_colorspace_property(struct drm_connector *connector)
return;
break;
default:
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ MISSING_CASE(connector->connector_type);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 78f9b6cde810..a59ecbed0004 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
-static void intel_disable_crt(struct intel_encoder *encoder,
+static void intel_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
-static void pch_disable_crt(struct intel_encoder *encoder,
+static void pch_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_crt(struct intel_encoder *encoder,
+static void pch_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_crt(encoder, old_crtc_state, old_conn_state);
+ intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
}
-static void hsw_disable_crt(struct intel_encoder *encoder,
+static void hsw_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_post_disable_crt(struct intel_encoder *encoder,
+static void hsw_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_disable_pipe_clock(old_crtc_state);
- pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+ pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
- intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+ intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
-static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -290,7 +297,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
intel_ddi_enable_pipe_clock(crtc_state);
}
-static void hsw_enable_crt(struct intel_encoder *encoder,
+static void hsw_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -314,7 +322,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
-static void intel_enable_crt(struct intel_encoder *encoder,
+static void intel_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -594,7 +603,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(connector->dev,
+ "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 73d0f4648c06..be6c61bcbc9c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
/* NT mV Trans mV db */
{ 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
@@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[]
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_17_12;
};
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x21, 0x00, 0x00 }, /* 1 0 */
+ { 0x2B, 0x00, 0x08 }, /* 1 1 */
+ { 0x30, 0x00, 0x0F }, /* 1 2 */
+ { 0x31, 0x00, 0x03 }, /* 2 0 */
+ { 0x34, 0x00, 0x0B }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
/* Voltage swing pre-emphasis */
- { 0x0, 0x1B, 0x00 }, /* 0 0 */
- { 0x0, 0x23, 0x08 }, /* 0 1 */
- { 0x0, 0x2D, 0x12 }, /* 0 2 */
- { 0x0, 0x00, 0x00 }, /* 0 3 */
- { 0x0, 0x23, 0x00 }, /* 1 0 */
- { 0x0, 0x2B, 0x09 }, /* 1 1 */
- { 0x0, 0x2E, 0x11 }, /* 1 2 */
- { 0x0, 0x2F, 0x00 }, /* 2 0 */
- { 0x0, 0x33, 0x0C }, /* 2 1 */
- { 0x0, 0x00, 0x00 }, /* 3 0 */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x26, 0x00, 0x00 }, /* 1 0 */
+ { 0x2C, 0x00, 0x07 }, /* 1 1 */
+ { 0x33, 0x00, 0x0C }, /* 1 2 */
+ { 0x2E, 0x00, 0x00 }, /* 2 0 */
+ { 0x36, 0x00, 0x09 }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
};
struct tgl_dkl_phy_ddi_buf_trans {
@@ -943,13 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+ return icl_mg_phy_ddi_translations_hdmi;
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+ return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+ return icl_mg_phy_ddi_translations_rbr_hbr;
+}
+
static const struct cnl_ddi_buf_trans *
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type == INTEL_OUTPUT_DP && rate > 270000) {
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
- return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+ return ehl_combo_phy_ddi_translations_dp;
}
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
@@ -959,7 +1003,7 @@ static const struct cnl_ddi_buf_trans *
tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type != INTEL_OUTPUT_DP) {
+ if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
} else if (rate > 270000) {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
@@ -988,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
+ &n_entries);
default_entry = n_entries - 1;
} else if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -1102,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
- DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
+ port_name(port));
}
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
@@ -1249,7 +1295,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
break;
}
@@ -1258,7 +1305,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* Results in less fireworks from the state checker.
*/
if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
- DRM_ERROR("FDI link training failed!\n");
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
break;
}
@@ -1450,6 +1497,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
+static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
+{
+ if (master_transcoder == TRANSCODER_EDP)
+ return 0;
+ else
+ return master_transcoder + 1;
+}
+
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -1550,6 +1605,15 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ if (IS_GEN_RANGE(dev_priv, 8, 10) &&
+ crtc_state->master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
+
+ temp |= TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
+ }
+
return temp;
}
@@ -1558,12 +1622,28 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ enum transcoder master_transcoder = crtc_state->master_transcoder;
+ u32 ctl2 = 0;
+
+ if (master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(master_transcoder);
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ ctl2 |= PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT(master_select);
+ }
+
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
+ }
+
+ ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
/*
@@ -1576,11 +1656,11 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
+ u32 ctl;
- temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
- temp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1588,24 +1668,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 val;
+ u32 ctl;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
+
+ ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TRANS_DDI_FUNC_ENABLE;
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+
+ if (IS_GEN_RANGE(dev_priv, 8, 10))
+ ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (INTEL_GEN(dev_priv) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
} else {
- val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
+ ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
@@ -1666,7 +1757,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1728,7 +1819,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
tmp = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -1786,20 +1877,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
- encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -1809,9 +1903,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", encoder->base.base.id,
- encoder->base.name, tmp);
+ drm_err(&dev_priv->drm,
+ "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
+ encoder->base.base.id, encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -1869,7 +1963,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
return;
dig_port = enc_to_dig_port(encoder);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -1973,7 +2071,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- DRM_ERROR("Invalid I_boost value %u\n", iboost);
+ drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
return;
}
@@ -2032,7 +2130,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ icl_get_mg_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
cnl_get_buf_trans_edp(dev_priv, &n_entries);
@@ -2232,7 +2331,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
return;
if (level >= n_entries) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 1);
level = n_entries - 1;
}
@@ -2345,21 +2446,28 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- int link_clock,
- u32 level)
+ int link_clock, u32 level,
+ enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
- int ln;
+ int ln, rate = 0;
+
+ if (type != INTEL_OUTPUT_HDMI) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
- ddi_translations = icl_mg_phy_ddi_translations;
+ rate = intel_dp->link_rate;
+ }
+
+ ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
+ &n_entries);
/* The table does not have values for level 3 and level 9. */
if (level >= n_entries || level == 3 || level == 9) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 2);
level = n_entries - 2;
}
@@ -2478,7 +2586,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
- icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
+ icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level,
+ type);
}
static void
@@ -2693,8 +2802,9 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
- DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(phy));
+ drm_notice(&dev_priv->drm,
+ "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
@@ -2931,11 +3041,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to set FEC_READY in the sink\n");
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2955,7 +3068,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2975,7 +3089,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
-static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3115,7 +3230,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
-static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3188,16 +3304,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
- hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
@@ -3209,7 +3326,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
}
}
-static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3249,7 +3367,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
crtc_state, conn_state);
}
-static void intel_ddi_pre_enable(struct intel_encoder *encoder,
+static void intel_ddi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3278,12 +3397,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
+ conn_state);
} else {
struct intel_lspcon *lspcon =
enc_to_intel_lspcon(encoder);
- intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ intel_ddi_pre_enable_dp(state, encoder, crtc_state,
+ conn_state);
if (lspcon->active) {
struct intel_digital_port *dig_port =
enc_to_dig_port(encoder);
@@ -3326,7 +3447,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
intel_wait_ddi_buf_idle(dev_priv, port);
}
-static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3382,7 +3504,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_ddi_clk_disable(encoder);
}
-static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3405,22 +3528,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
- transcoder_name(old_crtc_state->cpu_transcoder));
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
-}
-
-static void intel_ddi_post_disable(struct intel_encoder *encoder,
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3434,9 +3543,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_disable_pipe(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
-
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -3461,11 +3567,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
*/
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_post_disable_hdmi(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_ddi_post_disable_dp(encoder,
- old_crtc_state, old_conn_state);
+ intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
+ old_conn_state);
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
@@ -3478,7 +3584,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
intel_tc_port_put_link(dig_port);
}
-void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3512,7 +3619,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
-static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ if (!crtc_state->sync_mode_slaves_mask)
+ return;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *slave_encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *slave_crtc_state;
+
+ if (!slave_crtc)
+ continue;
+
+ slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ if (slave_crtc_state->master_transcoder !=
+ crtc_state->cpu_transcoder)
+ continue;
+
+ intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder));
+ }
+
+ usleep_range(200, 400);
+
+ intel_dp_stop_link_train(enc_to_intel_dp(encoder));
+}
+
+static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3531,6 +3674,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
+
+ trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
static i915_reg_t
@@ -3553,7 +3698,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
return CHICKEN_TRANS(trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3565,9 +3711,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
- "scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3615,7 +3761,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
-static void intel_enable_ddi(struct intel_encoder *encoder,
+static void intel_enable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3626,9 +3773,9 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+ intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+ intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
@@ -3638,7 +3785,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
(u8)conn_state->hdcp_content_type);
}
-static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3658,10 +3806,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
if (old_crtc_state->has_audio)
@@ -3670,23 +3820,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_encoder *encoder,
+static void intel_disable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
else
- intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ old_conn_state);
}
-static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3697,18 +3852,20 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
- intel_panel_update_backlight(encoder, crtc_state, conn_state);
+ intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
}
-static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+static void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ intel_ddi_update_pipe_dp(state, encoder, crtc_state,
+ conn_state);
- intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
+ intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
static void
@@ -3737,7 +3894,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
}
static void
-intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -3837,6 +3995,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
crtc_state->min_voltage_level = 2;
}
+static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 master_select;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
+ } else {
+ u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
+ }
+
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder =
+ bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3922,9 +4140,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->fec_enable =
intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
- encoder->base.base.id, encoder->base.name,
- pipe_config->fec_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
}
break;
@@ -3961,8 +4180,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
@@ -3988,6 +4208,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bdw_get_trans_port_sync_config(pipe_config);
}
static enum intel_output_type
@@ -4017,7 +4240,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4089,7 +4312,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
u8 transcoders = 0;
int i;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4121,12 +4348,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
- encoder->base.base.id, encoder->base.name,
- crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
if (connector->has_tile)
port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
@@ -4265,7 +4493,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+ drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
+ ret);
return 0;
}
@@ -4289,15 +4518,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static enum intel_hotplug_state
intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -4335,8 +4566,15 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
* valid EDID. To solve this schedule another detection cycle if this
* time around we didn't detect any change in the sink's connection
* status.
+ *
+ * Type-c connectors which get their HPD signal deasserted then
+ * reasserted, without unplugging/replugging the sink from the
+ * connector, introduce a delay until the AUX channel communication
+ * becomes functional. Retry the detection for 5 seconds on type-c
+ * connectors to account for this delay.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ if (state == INTEL_HOTPLUG_UNCHANGED &&
+ connector->hotplug_retries < (is_tc ? 5 : 1) &&
!dig_port->dp.is_mst)
state = INTEL_HOTPLUG_RETRY;
@@ -4411,7 +4649,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(intel_dport)) {
- DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Forcing DDI_A_4_LANES for port A\n");
intel_dport->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
@@ -4439,12 +4678,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
init_dp = true;
init_lspcon = true;
init_hdmi = false;
- DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ port_name(port));
}
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+ port_name(port));
return;
}
@@ -4523,14 +4764,16 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (init_lspcon) {
if (lspcon_init(intel_dig_port))
/* TODO: handle hdmi info frame part */
- DRM_DEBUG_KMS("LSPCON init success on port %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "LSPCON init success on port %c\n",
+ port_name(port));
else
/*
* LSPCON init faied, but DP init was success, so
* lets try to drive as DP++ port.
*/
- DRM_ERROR("LSPCON init failed on port %c\n",
+ drm_err(&dev_priv->drm,
+ "LSPCON init failed on port %c\n",
port_name(port));
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 55fd72b901fe..de4cd877c002 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -17,7 +17,8 @@ struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8f23c4d51c33..af5b4055b38a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
-/* Wa_2006604312:icl */
+/* Wa_2006604312:icl,ehl */
static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
@@ -544,17 +544,23 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->uapi);
}
-bool
-is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
- return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
- crtc_state->sync_mode_slaves_mask);
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
}
static bool
-is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->master_transcoder != INVALID_TRANSCODER;
+ return crtc_state->sync_mode_slaves_mask != 0;
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return is_trans_port_sync_master(crtc_state) ||
+ is_trans_port_sync_slave(crtc_state);
}
/*
@@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot / 5;
}
-#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
-
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
if (clock->m1 <= clock->m2)
- INTELPllInvalid("m1 <= m2\n");
+ return false;
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
!IS_GEN9_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
+ return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
+ return false;
}
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
+ return false;
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
+ return false;
return true;
}
@@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(to_i915(dev),
limit,
&clock))
continue;
@@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static int
intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
int main_plane;
int hsub, vsub;
@@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
* x/y offsets must match between CCS and the main surface.
*/
if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ drm_dbg_kms(&i915->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
main_x, main_y,
ccs_x, ccs_y,
intel_fb->normal[main_plane].x,
@@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
case PLANE_CTL_FORMAT_P010:
return DRM_FORMAT_P010;
case PLANE_CTL_FORMAT_P012:
@@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 trans_ddi_func_ctl2_val;
- u8 master_select;
-
- /*
- * Configure the master select and enable Transcoder Port Sync for
- * Slave CRTCs transcoder.
- */
- if (crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- if (crtc_state->master_transcoder == TRANSCODER_EDP)
- master_select = 0;
- else
- master_select = crtc_state->master_transcoder + 1;
-
- /* Set the master select bits for Tranascoder Port Sync */
- trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
- PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
- PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
- /* Enable Transcoder Port Sync */
- trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
-
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
-}
-
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -6200,6 +6179,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -6463,8 +6443,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- /* Wa_2006604312:icl */
- if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+ /* Wa_2006604312:icl,ehl */
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
return true;
return false;
@@ -6534,7 +6514,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, true);
- /* Wa_2006604312:icl */
+ /* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
@@ -6720,7 +6700,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+ encoder->pre_pll_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6741,7 +6722,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state,
continue;
if (encoder->pre_enable)
- encoder->pre_enable(encoder, crtc_state, conn_state);
+ encoder->pre_enable(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -6762,7 +6744,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
continue;
if (encoder->enable)
- encoder->enable(encoder, crtc_state, conn_state);
+ encoder->enable(state, encoder,
+ crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -6785,7 +6768,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state,
intel_opregion_notify_encoder(encoder, false);
if (encoder->disable)
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ encoder->disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6806,7 +6790,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_disable)
- encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6827,7 +6812,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
continue;
if (encoder->post_pll_disable)
- encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+ encoder->post_pll_disable(state, encoder,
+ old_crtc_state, old_conn_state);
}
}
@@ -6848,7 +6834,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
continue;
if (encoder->update_pipe)
- encoder->update_pipe(encoder, crtc_state, conn_state);
+ encoder->update_pipe(state, encoder,
+ crtc_state, conn_state);
}
}
@@ -7037,9 +7024,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_enable_trans_port_sync(new_crtc_state);
-
intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -9398,7 +9382,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -10622,7 +10605,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
@@ -10891,7 +10873,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
- if (HAS_TRANSCODER_EDP(dev_priv))
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
@@ -11085,61 +11067,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
}
}
-static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 trans_port_sync, master_select;
-
- trans_port_sync = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(cpu_transcoder));
-
- if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
- return INVALID_TRANSCODER;
-
- master_select = trans_port_sync &
- PORT_SYNC_MODE_MASTER_SELECT_MASK;
- if (master_select == 0)
- return TRANSCODER_EDP;
- else
- return master_select - 1;
-}
-
-static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 transcoders;
- enum transcoder cpu_transcoder;
-
- crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
- crtc_state->cpu_transcoder);
-
- transcoders = BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C) |
- BIT(TRANSCODER_D);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t trans_wakeref;
-
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
-
- if (!trans_wakeref)
- continue;
-
- if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
- crtc_state->cpu_transcoder)
- crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
-
- intel_display_power_put(dev_priv, power_domain, trans_wakeref);
- }
-
- drm_WARN_ON(&dev_priv->drm,
- crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
-}
-
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -11271,10 +11198,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (INTEL_GEN(dev_priv) >= 11 &&
- !transcoder_is_dsi(pipe_config->cpu_transcoder))
- icl_get_trans_port_sync_config(pipe_config);
-
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -12377,10 +12300,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->uapi.visible = visible = false;
- crtc_state->active_planes &= ~BIT(plane->id);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ intel_plane_set_invisible(crtc_state, plane_state);
+ visible = false;
}
if (!was_visible && !visible)
@@ -12886,16 +12807,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return 0;
}
-static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
{
- DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
}
static inline void
@@ -13042,6 +12964,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
@@ -13079,7 +13006,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
drm_dbg_kms(&dev_priv->drm,
"port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
@@ -14748,8 +14675,8 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->hw.mode.private_flags !=
- old_crtc_state->hw.mode.private_flags)
+ if (new_crtc_state->uapi.mode.private_flags !=
+ old_crtc_state->uapi.mode.private_flags)
new_crtc_state->uapi.mode_changed = true;
}
@@ -14999,11 +14926,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
}
static void commit_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = needs_modeset(new_crtc_state);
/*
@@ -15029,22 +14958,35 @@ static void commit_pipe_config(struct intel_atomic_state *state,
dev_priv->display.atomic_update_watermarks(state, crtc);
}
-static void intel_update_crtc(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+static void intel_enable_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- bool modeset = needs_modeset(new_crtc_state);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- if (modeset) {
- intel_crtc_update_active_timings(new_crtc_state);
+ if (!needs_modeset(new_crtc_state))
+ return;
- dev_priv->display.crtc_enable(state, crtc);
+ intel_crtc_update_active_timings(new_crtc_state);
- /* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(crtc);
- } else {
+ dev_priv->display.crtc_enable(state, crtc);
+
+ /* vblanks work again, re-enable pipe CRC. */
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (!modeset) {
if (new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -15064,7 +15006,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ commit_pipe_config(state, crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
@@ -15084,18 +15026,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
-static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
- enum transcoder slave_transcoder;
-
- drm_WARN_ON(&dev_priv->drm,
- !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
-
- slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
- return intel_get_crtc_for_pipe(dev_priv,
- (enum pipe)slave_transcoder);
-}
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
@@ -15171,129 +15101,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!new_crtc_state->hw.active)
continue;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_enable_crtc(state, crtc);
+ intel_update_crtc(state, crtc);
}
}
-static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
- intel_crtc_enable_pipe_crc(crtc);
-}
-
-static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct drm_connector *uninitialized_var(conn);
- struct drm_connector_state *conn_state;
- struct intel_dp *intel_dp;
- int i;
-
- for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc == &crtc->base)
- break;
- }
- intel_dp = intel_attached_dp(to_intel_connector(conn));
- intel_dp_stop_link_train(intel_dp);
-}
-
-/*
- * TODO: This is only called from port sync and it is identical to what will be
- * executed again in intel_update_crtc() over port sync pipes
- */
-static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
- struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
- commit_pipe_config(state, old_crtc_state, new_crtc_state);
- skl_update_planes_on_crtc(state, crtc);
- intel_pipe_update_end(new_crtc_state);
-
- /*
- * We usually enable FIFO underrun interrupts as part of the
- * CRTC enable sequence during modesets. But when we inherit a
- * valid pipe configuration from the BIOS we need to take care
- * of enabling them on the CRTC's first fastset.
- */
- if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
-}
-
-static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
- struct intel_crtc_state *new_slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, slave_crtc);
- struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
-
- drm_dbg_kms(&i915->drm,
- "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name,
- slave_crtc->base.base.id, slave_crtc->base.name);
-
- /* Enable seq for slave with with DP_TP_CTL left Idle until the
- * master is ready
- */
- intel_crtc_enable_trans_port_sync(slave_crtc,
- state,
- new_slave_crtc_state);
-
- /* Enable seq for master with with DP_TP_CTL left Idle */
- intel_crtc_enable_trans_port_sync(crtc,
- state,
- new_crtc_state);
-
- /* Set Slave's DP_TP_CTL to Normal */
- intel_set_dp_tp_ctl_normal(slave_crtc,
- state);
-
- /* Set Master's DP_TP_CTL To Normal */
- usleep_range(200, 400);
- intel_set_dp_tp_ctl_normal(crtc,
- state);
-
- /* Now do the post crtc enable for all master and slaves */
- intel_post_crtc_enable_updates(slave_crtc,
- state);
- intel_post_crtc_enable_updates(crtc,
- state);
-}
-
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -15365,8 +15185,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ intel_update_crtc(state, crtc);
/*
* If this is an already active pipe, it's DDB changed,
@@ -15381,67 +15200,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
}
}
+ update_pipes = modeset_pipes;
+
/*
* Enable all pipes that needs a modeset and do not depends on other
* pipes
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
- is_trans_port_sync_slave(new_crtc_state))
+ is_trans_port_sync_master(new_crtc_state))
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
-
- entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
- if (is_trans_port_sync_mode(new_crtc_state)) {
- struct intel_crtc *slave_crtc;
+ intel_enable_crtc(state, crtc);
+ }
- intel_update_trans_port_sync_crtcs(crtc, state,
- old_crtc_state,
- new_crtc_state);
+ /*
+ * Then we enable all remaining pipes that depend on other
+ * pipes: MST slaves and port sync masters.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
- slave_crtc = intel_get_slave_crtc(new_crtc_state);
- /* TODO: update entries[] of slave */
- modeset_pipes &= ~BIT(slave_crtc->pipe);
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
- } else {
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
- }
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_enable_crtc(state, crtc);
}
/*
- * Finally enable all pipes that needs a modeset and depends on
- * other pipes, right now it is only MST slaves as both port sync slave
- * and master are enabled together
+ * Finally we do the plane updates/etc. for all pipes that got enabled.
*/
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- if ((modeset_pipes & BIT(pipe)) == 0)
+ if ((update_pipes & BIT(pipe)) == 0)
continue;
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
- modeset_pipes &= ~BIT(pipe);
+ update_pipes &= ~BIT(pipe);
- intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ intel_update_crtc(state, crtc);
}
drm_WARN_ON(&dev_priv->drm, modeset_pipes);
-
+ drm_WARN_ON(&dev_priv->drm, update_pipes);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -18261,11 +18075,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
best_encoder = connector->base.state->best_encoder;
connector->base.state->best_encoder = &encoder->base;
+ /* FIXME NULL atomic state passed! */
if (encoder->disable)
- encoder->disable(encoder, crtc_state,
+ encoder->disable(NULL, encoder, crtc_state,
connector->base.state);
if (encoder->post_disable)
- encoder->post_disable(encoder, crtc_state,
+ encoder->post_disable(NULL, encoder, crtc_state,
connector->base.state);
connector->base.state->best_encoder = best_encoder;
@@ -18802,15 +18617,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static bool
-has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
-{
- if (cpu_transcoder == TRANSCODER_EDP)
- return HAS_TRANSCODER_EDP(dev_priv);
- else
- return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
-}
-
struct intel_display_error_state {
u32 power_well_driver;
@@ -18919,7 +18725,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!has_transcoder(dev_priv, cpu_transcoder))
+ if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index adb1225a3480..cc7f287804d7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -320,9 +320,13 @@ enum phy_fia {
for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
-#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if ((__mask) & (1 << (__t)))
+ for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for_each_cpu_transcoder(__dev_priv, __t) \
+ for_each_if ((__mask) & BIT(__t))
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 9f736420d83f..bdeea2e02642 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -1320,6 +1320,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
intel_dp->compliance.test_data.vdisplay);
seq_printf(m, "bpc: %u\n",
intel_dp->compliance.test_data.bpc);
+ } else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_PHY_TEST_PATTERN) {
+ seq_printf(m, "pattern: %d\n",
+ intel_dp->compliance.test_data.phytest.phy_pattern);
+ seq_printf(m, "Number of lanes: %d\n",
+ intel_dp->compliance.test_data.phytest.num_lanes);
+ seq_printf(m, "Link Rate: %d\n",
+ intel_dp->compliance.test_data.phytest.link_rate);
+ seq_printf(m, "level: %02x\n",
+ intel_dp->train_set[0]);
}
} else
seq_puts(m, "0");
@@ -1352,7 +1362,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
if (encoder && connector->status == connector_status_connected) {
intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 246e406bb385..03bdde19c8c9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1873,20 +1873,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
- DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+
+ drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
power_domains->async_put_domains[0]);
@@ -4140,7 +4147,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX D TBT1",
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4151,7 +4158,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX E TBT2",
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4162,7 +4169,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX F TBT3",
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4173,7 +4180,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX G TBT4",
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4184,7 +4191,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX H TBT5",
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4195,7 +4202,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX I TBT6",
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4480,7 +4487,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
"Invalid number of dbuf slices requested\n");
- DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
+ drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ req_slices);
/*
* Might be running this in parallel to gen9_dc_off_power_well_enable
@@ -5016,7 +5024,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
const struct buddy_page_mask *table;
int i;
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
table = wa_1409767108_buddy_page_masks;
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e67474a71761..ba8c08145c88 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -132,8 +132,7 @@ struct intel_encoder {
u16 cloneable;
u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -146,28 +145,35 @@ struct intel_encoder {
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*pre_pll_enable)(struct intel_encoder *,
+ void (*pre_pll_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*pre_enable)(struct intel_encoder *,
+ void (*pre_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*enable)(struct intel_encoder *,
+ void (*enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
- void (*disable)(struct intel_encoder *,
+ void (*disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_disable)(struct intel_encoder *,
+ void (*post_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*post_pll_disable)(struct intel_encoder *,
+ void (*post_pll_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
- void (*update_pipe)(struct intel_encoder *,
+ void (*update_pipe)(struct intel_atomic_state *,
+ struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
@@ -425,6 +431,9 @@ struct intel_connector {
struct edid *edid;
struct edid *detect_edid;
+ /* Number of times hotplug detection was tried after an HPD interrupt */
+ int hotplug_retries;
+
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
@@ -640,6 +649,16 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+/*
+ * TE0 or TE1 flag is set if the crtc has a DSI encoder which
+ * is operating in command mode.
+ * Flag to use TE from DSI0 instead of VBI in command mode
+ */
+#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
+/* Flag to use TE from DSI1 instead of VBI in command mode */
+#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
+/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
+#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
struct intel_wm_level {
bool enable;
@@ -1015,6 +1034,7 @@ struct intel_crtc_state {
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
union hdmi_infoframe drm;
+ struct drm_dp_vsc_sdp vsc;
} infoframes;
/* HDMI scrambling status */
@@ -1238,6 +1258,7 @@ struct intel_dp_compliance_data {
u8 video_pattern;
u16 hdisplay, vdisplay;
u8 bpc;
+ struct drm_dp_phy_test_params phytest;
};
struct intel_dp_compliance {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 0a417cd2af2b..d4fcc9583869 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -164,6 +164,17 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
};
int i, max_rate;
+ if (drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
+ static const int quirk_rates[] = { 162000, 270000, 324000 };
+
+ memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
+ intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
+
+ return;
+ }
+
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
@@ -452,6 +463,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, u8 lane_count)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
index = intel_dp_rate_index(intel_dp->common_rates,
@@ -462,7 +474,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp->common_rates[index - 1],
lane_count)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
@@ -472,13 +485,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp_max_common_rate(intel_dp),
lane_count >> 1)) {
- DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->max_link_lane_count = lane_count >> 1;
} else {
- DRM_ERROR("Link Training Unsuccessful\n");
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
return -1;
}
@@ -553,6 +567,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
int mode_clock, int mode_hdisplay)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 min_slice_count, i;
int max_slice_width;
@@ -565,8 +580,9 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
+ drm_dbg_kms(&i915->drm,
+ "Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
return 0;
}
/* Also take into account max slice width */
@@ -584,7 +600,8 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return valid_dsc_slicecount[i];
}
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ min_slice_count);
return 0;
}
@@ -1374,7 +1391,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- pm_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&i915->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1507,7 +1524,7 @@ done:
ret = recv_bytes;
out:
- pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1832,6 +1849,7 @@ static void snprintf_int_array(char *str, size_t len,
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
char str[128]; /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
@@ -1839,15 +1857,15 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
snprintf_int_array(str, sizeof(str),
intel_dp->source_rates, intel_dp->num_source_rates);
- DRM_DEBUG_KMS("source rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->sink_rates, intel_dp->num_sink_rates);
- DRM_DEBUG_KMS("sink rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
snprintf_int_array(str, sizeof(str),
intel_dp->common_rates, intel_dp->num_common_rates);
- DRM_DEBUG_KMS("common rates: %s\n", str);
+ drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
}
int
@@ -1954,6 +1972,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/* For DP Compliance we override the computed bpp for the pipe */
if (intel_dp->compliance.test_data.bpc != 0) {
int bpp = 3 * intel_dp->compliance.test_data.bpc;
@@ -1961,7 +1981,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
limits->min_bpp = limits->max_bpp = bpp;
pipe_config->dither_force_disable = bpp == 6 * 3;
- DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
+ drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
}
/* Use values requested by Compliance Test Request */
@@ -2055,6 +2075,7 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u8 line_buf_depth;
@@ -2089,7 +2110,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ drm_dbg_kms(&i915->drm,
+ "DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -2114,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -2229,7 +2252,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
@@ -2264,11 +2289,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- DRM_DEBUG_KMS("DP link computation with max lane count %i "
- "max rate %d max bpp %d pixel clock %iKHz\n",
- limits.max_lane_count,
- intel_dp->common_rates[limits.max_clock],
- limits.max_bpp, adjusted_mode->crtc_clock);
+ drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
+ "max rate %d max bpp %d pixel clock %iKHz\n",
+ limits.max_lane_count,
+ intel_dp->common_rates[limits.max_clock],
+ limits.max_bpp, adjusted_mode->crtc_clock);
/*
* Optimize for slow and wide. This is the place to add alternative
@@ -2277,7 +2302,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
- DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+ drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
if (ret || intel_dp->force_dsc_en) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
@@ -2286,26 +2311,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
if (pipe_config->dsc.compression_enable) {
- DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
+
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
} else {
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
}
return 0;
}
@@ -2315,6 +2343,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
struct drm_connector *connector,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -2331,7 +2360,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
/* YCBCR 420 output conversion needs a scaler */
ret = skl_update_scaler_crtc(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
+ drm_dbg_kms(&i915->drm,
+ "Scaler allocation for output failed\n");
return ret;
}
@@ -2384,6 +2414,128 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
return true;
}
+static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ vsc->length = 0x13;
+
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ vsc->pixelformat = DP_PIXELFORMAT_RGB;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
+ break;
+ default:
+ /*
+ * RGB->YCBCR color conversion uses the BT.709
+ * color space.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ else
+ vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
+ break;
+ }
+
+ vsc->bpc = crtc_state->pipe_bpp / 3;
+
+ /* only RGB pixelformat supports 6 bpc */
+ drm_WARN_ON(&dev_priv->drm,
+ vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
+
+ /* all YCbCr are always limited range */
+ vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+}
+
+static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (intel_psr_enabled(intel_dp))
+ return;
+
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ vsc->sdp_type = DP_SDP_VSC;
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ &crtc_state->infoframes.vsc);
+}
+
+static void
+intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2489,6 +2641,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
+ intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
return 0;
}
@@ -2633,22 +2787,27 @@ static void wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_on(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power on\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- DRM_DEBUG_KMS("Wait for panel power off time\n");
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
/* take the difference of currrent time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -3012,11 +3171,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
_intel_edp_backlight_on(intel_dp);
@@ -3050,11 +3210,12 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
_intel_edp_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
@@ -3067,6 +3228,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -3077,8 +3239,8 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
if (is_enabled == enable)
return;
- DRM_DEBUG_KMS("panel power control backlight %s\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
if (enable)
_intel_edp_backlight_on(intel_dp);
@@ -3188,6 +3350,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret;
if (!crtc_state->dsc.compression_enable)
@@ -3196,13 +3359,15 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
enable ? DP_DECOMPRESSION_EN : 0);
if (ret < 0)
- DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
}
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3235,8 +3400,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
}
if (ret != 1)
- DRM_DEBUG_KMS("failed to %s sink power state\n",
- mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
+ mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
@@ -3393,7 +3558,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static void intel_disable_dp(struct intel_encoder *encoder,
+static void intel_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3413,21 +3579,24 @@ static void intel_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_off(intel_dp);
}
-static void g4x_disable_dp(struct intel_encoder *encoder,
+static void g4x_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void vlv_disable_dp(struct intel_encoder *encoder,
+static void vlv_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
}
-static void g4x_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3447,14 +3616,16 @@ static void g4x_post_disable_dp(struct intel_encoder *encoder,
ilk_edp_pll_off(intel_dp, old_crtc_state);
}
-static void vlv_post_disable_dp(struct intel_encoder *encoder,
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_dp_link_down(encoder, old_crtc_state);
}
-static void chv_post_disable_dp(struct intel_encoder *encoder,
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -3580,7 +3751,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-static void intel_enable_dp(struct intel_encoder *encoder,
+static void intel_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3626,22 +3798,25 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
}
-static void g4x_enable_dp(struct intel_encoder *encoder,
+static void g4x_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void vlv_enable_dp(struct intel_encoder *encoder,
+static void vlv_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
}
-static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3761,16 +3936,18 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
-static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
}
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3779,19 +3956,21 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_pre_enable_dp(struct intel_encoder *encoder,
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder, pipe_config);
- intel_enable_dp(encoder, pipe_config, conn_state);
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
}
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -3800,7 +3979,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -4319,6 +4499,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
static void
intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 dpcd_ext[6];
/*
@@ -4334,20 +4515,22 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
&dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
- DRM_ERROR("DPCD failed read at extended capabilities\n");
+ drm_err(&i915->drm,
+ "DPCD failed read at extended capabilities\n");
return;
}
if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+ drm_dbg_kms(&i915->drm,
+ "DPCD extended DPCD rev less than base DPCD rev\n");
return;
}
if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
return;
- DRM_DEBUG_KMS("Base DPCD: %*ph\n",
- (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
}
@@ -4355,13 +4538,16 @@ intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
intel_dp_extended_receiver_capabilities(intel_dp);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
+ drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
+ intel_dp->dpcd);
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
@@ -4378,6 +4564,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
/*
* Clear the cached register set to avoid using stale values
* for the sinks that do not support DSC.
@@ -4393,20 +4581,23 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
intel_dp->dsc_dpcd,
sizeof(intel_dp->dsc_dpcd)) < 0)
- DRM_ERROR("Failed to read DPCD register 0x%x\n",
- DP_DSC_SUPPORT);
+ drm_err(&i915->drm,
+ "Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
- DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
- (int)sizeof(intel_dp->dsc_dpcd),
- intel_dp->dsc_dpcd);
+ drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
/* FEC is supported only on DP 1.4 */
if (!intel_dp_is_edp(intel_dp) &&
drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
&intel_dp->fec_capable) < 0)
- DRM_ERROR("Failed to read FEC DPCD register\n");
+ drm_err(&i915->drm,
+ "Failed to read FEC DPCD register\n");
- DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ intel_dp->fec_capable);
}
}
@@ -4580,14 +4771,16 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder =
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
- encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
- yesno(i915_modparams.enable_dp_mst));
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4633,6 +4826,205 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
+static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp, size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
+ */
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
+
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
+ break;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
+ break;
+ case 10:
+ sdp->db[17] = 0x2;
+ break;
+ case 12:
+ sdp->db[17] = 0x3;
+ break;
+ case 16:
+ sdp->db[17] = 0x4;
+ break;
+ default:
+ MISSING_CASE(vsc->bpc);
+ break;
+ }
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
+
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
+
+ return length;
+}
+
+static ssize_t
+intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
+ struct dp_sdp *sdp,
+ size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ return -ENOSPC;
+ }
+
+ if (len != infoframe_size) {
+ DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ return -ENOSPC;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
+ sdp->sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87
+ * - 80h + Non-audio INFOFRAME Type value
+ * - InfoFrame Type: 0x07
+ * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
+ */
+ sdp->sdp_header.HB1 = drm_infoframe->type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1
+ */
+ sdp->sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ sdp->sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ sdp->db[0] = drm_infoframe->version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ sdp->db[1] = drm_infoframe->length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata consists of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
+}
+
+static void intel_write_dp_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ switch (type) {
+ case DP_SDP_VSC:
+ len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
+ sizeof(sdp));
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
+ &sdp, sizeof(sdp));
+ break;
+ default:
+ MISSING_CASE(type);
+ return;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
+}
+
+void intel_dp_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+ u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
+ VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+ u32 val = intel_de_read(dev_priv, reg);
+
+ /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* When PSR is enabled, this routine doesn't disable VSC DIP */
+ if (intel_psr_enabled(intel_dp))
+ val &= ~dip_enable;
+ else
+ val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
+
+ if (!enable) {
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (!intel_psr_enabled(intel_dp))
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+
+ intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
static void
intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -4762,6 +5154,7 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct dp_sdp infoframe_sdp = {};
struct hdmi_drm_infoframe drm_infoframe = {};
@@ -4772,18 +5165,20 @@ intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
if (ret) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(&i915->drm,
+ "couldn't set HDR metadata in infoframe\n");
return;
}
len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
if (len < 0) {
- DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ drm_dbg_kms(&i915->drm,
+ "buffer size is smaller than hdr metadata infoframe\n");
return;
}
if (len != infoframe_size) {
- DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
return;
}
@@ -4861,6 +5256,7 @@ void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int status = 0;
int test_link_rate;
u8 test_lane_count, test_link_bw;
@@ -4872,7 +5268,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
&test_lane_count);
if (status <= 0) {
- DRM_DEBUG_KMS("Lane count read failed\n");
+ drm_dbg_kms(&i915->drm, "Lane count read failed\n");
return DP_TEST_NAK;
}
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
@@ -4880,7 +5276,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
&test_link_bw);
if (status <= 0) {
- DRM_DEBUG_KMS("Link Rate read failed\n");
+ drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
return DP_TEST_NAK;
}
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
@@ -4898,6 +5294,7 @@ static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_pattern;
u8 test_misc;
__be16 h_width, v_height;
@@ -4907,7 +5304,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
&test_pattern);
if (status <= 0) {
- DRM_DEBUG_KMS("Test pattern read failed\n");
+ drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
return DP_TEST_NAK;
}
if (test_pattern != DP_COLOR_RAMP)
@@ -4916,21 +5313,21 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
&h_width, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("H Width read failed\n");
+ drm_dbg_kms(&i915->drm, "H Width read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
&v_height, 2);
if (status <= 0) {
- DRM_DEBUG_KMS("V Height read failed\n");
+ drm_dbg_kms(&i915->drm, "V Height read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
&test_misc);
if (status <= 0) {
- DRM_DEBUG_KMS("TEST MISC read failed\n");
+ drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
return DP_TEST_NAK;
}
if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
@@ -4959,6 +5356,7 @@ static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -4975,9 +5373,10 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
*/
if (intel_dp->aux.i2c_nack_count > 0 ||
intel_dp->aux.i2c_defer_count > 0)
- DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
- intel_dp->aux.i2c_nack_count,
- intel_dp->aux.i2c_defer_count);
+ drm_dbg_kms(&i915->drm,
+ "EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
@@ -4989,7 +5388,8 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
- DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
@@ -5001,43 +5401,217 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
+static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+
+ if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+ DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
+ return DP_TEST_NAK;
+ }
+
+ /*
+ * link_mst is set to false to avoid executing mst related code
+ * during compliance testing.
+ */
+ intel_dp->link_mst = false;
+
+ return DP_TEST_ACK;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 pattern_val;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_NONE:
+ DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ break;
+ case DP_PHY_TEST_PATTERN_D10_2:
+ DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_SCRAMBLED_0);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x250. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ pattern_val = 0x3e0f83e0;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+ pattern_val = 0x0f83e0f8;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+ pattern_val = 0x0000f83e;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_CUSTOM80);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x24A. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
+ pattern_val = 0xFB;
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+ pattern_val);
+ break;
+ default:
+ WARN(1, "Invalid Phy Test Pattern\n");
+ }
+}
+
+static void
+intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_PORT_MASK);
+ trans_conf_value &= ~PIPECONF_ENABLE;
+ dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+}
+
+static void
+intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+
+ trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(pipe));
+ trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+ dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+
+ trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+ TGL_TRANS_DDI_SELECT_PORT(port);
+ trans_conf_value |= PIPECONF_ENABLE;
+ dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+
+ intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+ intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+ trans_ddi_func_ctl_value);
+}
+
+void intel_dp_process_phy_request(struct intel_dp *intel_dp)
+{
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_DEBUG_KMS("failed to get link status\n");
+ return;
+ }
+
+ /* retrieve vswing & pre-emphasis setting */
+ intel_dp_get_adjust_train(intel_dp, link_status);
+
+ intel_dp_autotest_phy_ddi_disable(intel_dp);
+
+ intel_dp_set_signal_levels(intel_dp);
+
+ intel_dp_phy_pattern_update(intel_dp);
+
+ intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+ link_status[DP_DPCD_REV]);
+}
+
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
u8 test_result = DP_TEST_NAK;
+
+ test_result = intel_dp_prepare_phytest(intel_dp);
+ if (test_result != DP_TEST_ACK)
+ DRM_ERROR("Phy test preparation failed\n");
+
+ intel_dp_process_phy_request(intel_dp);
+
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 response = DP_TEST_NAK;
u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
if (status <= 0) {
- DRM_DEBUG_KMS("Could not read test request from sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not read test request from sink\n");
goto update_status;
}
switch (request) {
case DP_TEST_LINK_TRAINING:
- DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+ drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
- DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
- DRM_DEBUG_KMS("EDID test requested\n");
+ drm_dbg_kms(&i915->drm, "EDID test requested\n");
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
- DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+ drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
- DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
+ drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
+ request);
break;
}
@@ -5047,12 +5621,14 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
update_status:
status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
if (status <= 0)
- DRM_DEBUG_KMS("Could not write test response to sink\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not write test response to sink\n");
}
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool bret;
if (intel_dp->is_mst) {
@@ -5069,12 +5645,13 @@ go_again:
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links > 0 &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
- DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+ drm_dbg_kms(&i915->drm,
+ "channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
- DRM_DEBUG_KMS("got esi %3ph\n", esi);
+ drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
if (handled) {
@@ -5090,7 +5667,8 @@ go_again:
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
- DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
+ drm_dbg_kms(&i915->drm,
+ "got esi2 %3ph\n", esi);
goto go_again;
}
} else
@@ -5098,7 +5676,8 @@ go_again:
return ret;
} else {
- DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+ drm_dbg_kms(&i915->drm,
+ "failed to get ESI - device may have failed\n");
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -5220,14 +5799,13 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
*/
static enum intel_hotplug_state
intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5251,7 +5829,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
* Keeping it consistent with intel_ddi_hotplug() and
* intel_hdmi_hotplug().
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
@@ -5259,6 +5837,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5277,7 +5856,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+ drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
/*
@@ -5344,6 +5923,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
@@ -5391,7 +5971,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5863,6 +6443,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
int ret;
@@ -5872,8 +6453,8 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_connector_debugfs_add(connector);
- DRM_DEBUG_KMS("registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
@@ -5959,6 +6540,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
static const struct drm_dp_aux_msg msg = {
.request = DP_AUX_NATIVE_WRITE,
@@ -5973,8 +6555,9 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
- dpcd_ret);
+ drm_dbg_kms(&i915->drm,
+ "Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5990,17 +6573,19 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
+ drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
return -EIO;
}
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
if (reply != DP_AUX_NATIVE_REPLY_ACK) {
- DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
- reply);
+ drm_dbg_kms(&i915->drm,
+ "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+ reply);
return -EIO;
}
return 0;
@@ -6009,11 +6594,14 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6022,7 +6610,9 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
/*
* For some reason the HDMI and DP HDCP specs call this register
* definition by different names. In the HDMI spec, it's called BSTATUS,
@@ -6031,7 +6621,8 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6041,12 +6632,14 @@ static
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
u8 *bcaps)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6072,11 +6665,14 @@ static
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6086,12 +6682,15 @@ static
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
+
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -6102,6 +6701,7 @@ static
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
int i;
@@ -6113,8 +6713,9 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
- i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -6125,6 +6726,7 @@ static
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -6134,7 +6736,8 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ drm_dbg_kms(&i915->drm,
+ "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -6151,13 +6754,15 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
u8 bstatus;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -6232,13 +6837,15 @@ static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
u8 *rx_status)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
- DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -6282,6 +6889,7 @@ static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
@@ -6313,8 +6921,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
}
if (ret)
- DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
- hdcp2_msg_data->msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm,
+ "msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
return ret;
}
@@ -6400,6 +7009,7 @@ static
int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -6433,7 +7043,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
- DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ msg_id, ret);
return ret;
}
@@ -6724,7 +7335,11 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) < 11)
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
if (!intel_connector_needs_modeset(state, conn))
@@ -6763,28 +7378,45 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
+static bool intel_edp_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
+ if (intel_dig_port->base.type == INTEL_OUTPUT_EDP &&
+ (long_hpd || !intel_edp_have_power(intel_dp))) {
/*
- * vdd off can generate a long pulse on eDP which
+ * vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
* would end up in an endless cycle of
- * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
+ * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&i915->drm,
+ "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
+ long_hpd ? "long" : "short",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- long_hpd ? "long" : "short");
+ drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ long_hpd ? "long" : "short");
if (long_hpd) {
intel_dp->reset_link_params = true;
@@ -6797,8 +7429,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* If we were in MST mode, and device is not
* there, get out of MST mode
*/
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&i915->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0c7be8ed1423..6659ce15a693 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -114,7 +114,11 @@ void intel_dp_vsc_enable(struct intel_dp *intel_dp,
void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
+void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+void intel_dp_process_phy_request(struct intel_dp *intel_dp);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 3e706bb850a8..4b916468540f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -27,6 +27,7 @@
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
/* Early return when display use other mechanism to enable backlight. */
@@ -35,8 +36,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_DISPLAY_CONTROL_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
@@ -46,8 +47,8 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
- DRM_DEBUG_KMS("Failed to %s aux backlight\n",
- enable ? "enable" : "disable");
+ drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
}
}
@@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 read_val[2] = { 0x0 };
u8 mode_reg;
u16 level = 0;
@@ -65,8 +67,9 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
&mode_reg) != 1) {
- DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm,
+ "Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return 0;
}
@@ -80,8 +83,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
@@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight level\n");
return;
}
}
@@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
freq = dev_priv->vbt.backlight.pwm_freq_hz;
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Use panel default backlight frequency\n");
return false;
}
@@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
- DRM_DEBUG_KMS("Actual frequency out of range\n");
+ drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n");
return false;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write aux backlight freq\n");
return false;
}
return true;
@@ -163,13 +170,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
- DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
return;
}
@@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
panel->backlight.pwmgen_bit_count) < 0)
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
break;
@@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (new_dpcd_buf != dpcd_buf) {
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux backlight mode\n");
}
}
@@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
* minimum value will applied automatically. So no need to check that.
*/
freq = i915->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n",
+ freq);
if (!freq) {
- DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ drm_dbg_kms(&i915->drm,
+ "Use panel default backlight frequency\n");
return max_backlight;
}
@@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
*/
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap min\n");
return max_backlight;
}
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to read pwmgen bit count cap max\n");
return max_backlight;
}
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
@@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT defined backlight frequency out of range\n");
return max_backlight;
}
@@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
break;
}
- DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn);
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ drm_dbg_kms(&i915->drm,
+ "Failed to write aux pwmgen bit count\n");
return max_backlight;
}
panel->backlight.pwmgen_bit_count = pn;
@@ -312,6 +328,7 @@ static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
- DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
}
return false;
@@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (i915_modparams.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
@@ -340,17 +356,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
* There are a lot of machines that don't advertise the backlight
* control interface to use properly in their VBIOS, :\
*/
- if (dev_priv->vbt.backlight.type !=
+ if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- DRM_DEV_INFO(dev->dev,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
+ drm_info(&i915->drm,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a7defb37ab00..e4f1843170b7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,9 +34,8 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- const u8 link_status[DP_LINK_STATUS_SIZE])
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
u8 v = 0;
u8 p = 0;
@@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
@@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
}
/* Update training set as requested by target */
- intel_get_adjust_train(intel_dp, link_status);
+ intel_dp_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
drm_err(&i915->drm,
"failed to update link training\n");
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 174566adcc92..01f1dabbb060 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_DP_LINK_TRAINING_H__
#define __INTEL_DP_LINK_TRAINING_H__
+#include <drm/drm_dp_helper.h>
+
struct intel_dp;
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 35debce71366..a83f910d8e15 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
false);
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- port, crtc_state->pbn, 0);
+ connector->port,
+ crtc_state->pbn, 0);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
}
if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ slots);
return slots;
}
@@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
-/*
- * Iterate over all connectors and return the smallest transcoder in the MST
- * stream
- */
-static enum transcoder
-intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
- struct intel_dp *mst_port)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_digital_connector_state *conn_state;
- struct intel_connector *connector;
- enum pipe ret = I915_MAX_PIPES;
- int i;
-
- if (INTEL_GEN(dev_priv) < 12)
- return INVALID_TRANSCODER;
-
- for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- if (connector->mst_port != mst_port || !conn_state->base.crtc)
- continue;
-
- crtc = to_intel_crtc(conn_state->base.crtc);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (!crtc_state->uapi.active)
- continue;
-
- /*
- * Using crtc->pipe because crtc_state->cpu_transcoder is
- * computed, so others CRTCs could have non-computed
- * cpu_transcoder
- */
- if (crtc->pipe < ret)
- ret = crtc->pipe;
- }
-
- if (ret == I915_MAX_PIPES)
- return INVALID_TRANSCODER;
-
- /* Simple cast works because TGL don't have a eDP transcoder */
- return (enum transcoder)ret;
-}
-
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
@@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -200,7 +155,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
+ return 0;
+}
+
+/*
+ * Iterate over all connectors and return a mask of
+ * all CPU transcoders streaming over the same DP link.
+ */
+static unsigned int
+intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return 0;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+ /* lowest numbered transcoder will be designated master */
+ crtc_state->mst_master_transcoder =
+ ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
return 0;
}
@@ -312,7 +316,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
-static void intel_mst_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -321,22 +326,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&i915->drm, "active links %d\n",
+ intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_DEBUG_KMS("failed to update payload %d\n", ret);
+ drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -370,7 +378,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for ACT sent when disabling\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@@ -401,13 +410,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_mst->connector = NULL;
if (last_mst_stream)
- intel_dig_port->base.post_disable(&intel_dig_port->base,
+ intel_dig_port->base.post_disable(state, &intel_dig_port->base,
old_crtc_state, NULL);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -416,11 +427,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
pipe_config, NULL);
}
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -444,7 +456,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -452,7 +465,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (first_mst_stream)
- intel_dig_port->base.pre_enable(&intel_dig_port->base,
+ intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
@@ -460,7 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
pipe_config->pbn,
pipe_config->dp_m_n.tu);
if (!ret)
- DRM_ERROR("failed to allocate vcpi\n");
+ drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
@@ -483,7 +496,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_set_m_n(pipe_config, M1_N1);
}
-static void intel_mst_enable_dp(struct intel_encoder *encoder,
+static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -498,11 +512,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
intel_crtc_vblank_on(pipe_config);
- DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
- DRM_ERROR("Timed out waiting for ACT sent\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -785,6 +800,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index a2a937109a5a..afa4e6817e8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
int intel_dsi_get_modes(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&i915->drm, "no fixed mode\n");
return 0;
}
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
return 0;
}
@@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 574dcfec9577..3c9c05478a03 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct device *dev = &drm_dev->pdev->dev;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -489,9 +488,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- DRM_DEV_ERROR(dev,
- "Failed to xfer payload of size (%u) to reg (%u)\n",
- payload_size, reg_offset);
+ drm_err(&i915->drm,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
kfree(payload_data);
err_alloc:
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 341d5ce8b062..5cd09034519b 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
-static void intel_disable_dvo(struct intel_encoder *encoder,
+static void intel_disable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
intel_de_read(dev_priv, dvo_reg);
}
-static void intel_enable_dvo(struct intel_encoder *encoder,
+static void intel_enable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+static void intel_dvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 2e5d835a9eaa..56bcd6c52a02 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
+ drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
return;
}
}
@@ -485,7 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
if (!ret)
goto err_llb;
else if (ret > 1) {
- DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+ drm_info(&dev_priv->drm,
+ "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
@@ -521,8 +522,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
dev_priv->dsm.start + compressed_llb->start);
}
- DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
- fbc->compressed_fb.size, fbc->threshold);
+ drm_dbg_kms(&dev_priv->drm,
+ "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+ fbc->compressed_fb.size, fbc->threshold);
return 0;
@@ -531,7 +533,7 @@ err_fb:
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
if (drm_mm_initialized(&dev_priv->mm.stolen))
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -606,6 +608,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
}
}
+static bool rotation_is_valid(struct drm_i915_private *dev_priv,
+ u32 pixel_format, unsigned int rotation)
+{
+ if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+ drm_rotation_90_or_270(rotation))
+ return false;
+ else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+ rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
@@ -640,6 +655,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
+static bool tiling_is_valid(struct drm_i915_private *dev_priv,
+ uint64_t modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ if (INTEL_GEN(dev_priv) >= 9)
+ return true;
+ return false;
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -673,6 +704,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
+ cache->fb.modifier = fb->modifier;
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
@@ -746,29 +778,39 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
+ /* The use of a CPU fence is one of two ways to detect writes by the
+ * CPU to the scanout and trigger updates to the FBC.
+ *
+ * The other method is by software tracking (see
+ * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+ * the current compressed buffer and recompress it.
*
* Note that is possible for a tiled surface to be unmappable (and
- * so have no fence associated with it) due to aperture constaints
+ * so have no fence associated with it) due to aperture constraints
* at the time of pinning.
*
* FIXME with 90/270 degree rotation we should use the fence on
* the normal GTT view (the rotated view doesn't even have a
* fence). Would need changes to the FBC fence Y offset as well.
- * For now this will effecively disable FBC with 90/270 degree
+ * For now this will effectively disable FBC with 90/270 degree
* rotation.
*/
- if (cache->fence_id < 0) {
+ if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
- if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
- cache->plane.rotation != DRM_MODE_ROTATE_0) {
+
+ if (!rotation_is_valid(dev_priv, cache->fb.format->format,
+ cache->plane.rotation)) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
+ if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
+ fbc->no_fbc_reason = "tiling unsupported";
+ return false;
+ }
+
if (!stride_is_valid(dev_priv, cache->fb.stride)) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
@@ -948,7 +990,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
drm_WARN_ON(&dev_priv->drm, fbc->active);
- DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
@@ -1176,7 +1219,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
else
cache->gen9_wa_cfb_stride = 0;
- DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
fbc->crtc = crtc;
@@ -1238,7 +1282,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
if (fbc->underrun_detected || !fbc->crtc)
goto out;
- DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(dev_priv, "FIFO underrun");
@@ -1264,7 +1308,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
return ret;
if (dev_priv->fbc.underrun_detected) {
- DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-allowing FBC after fifo underrun\n");
dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
}
@@ -1335,7 +1380,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (intel_vtd_active() &&
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ drm_info(&dev_priv->drm,
+ "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
return true;
}
@@ -1363,8 +1409,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
- i915_modparams.enable_fbc);
+ drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
+ i915_modparams.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 3bc804212a99..bd39eb6a21b8 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
- DRM_ERROR("failed to allocate framebuffer\n");
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
return PTR_ERR(obj);
}
@@ -183,21 +183,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
- DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
- " releasing it\n",
- intel_fb->base.width, intel_fb->base.height,
- sizes->fb_width, sizes->fb_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
- DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
return ret;
intel_fb = ifbdev->fb;
} else {
- DRM_DEBUG_KMS("re-using BIOS fb\n");
+ drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
- DRM_ERROR("Failed to allocate fb_info\n");
+ drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory\n");
ret = PTR_ERR(vaddr);
goto out_unpin;
}
@@ -258,9 +261,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- ifbdev->fb->base.width, ifbdev->fb->base.height,
- i915_ggtt_offset(vma));
+ drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
@@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -321,21 +325,24 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active || !obj) {
- DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active or no fb, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
if (obj->base.size > max_size) {
- DRM_DEBUG_KMS("found possible fb from plane %c\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "found possible fb from plane %c\n",
+ pipe_name(intel_crtc->pipe));
fb = to_intel_framebuffer(crtc->primary->state->fb);
max_size = obj->base.size;
}
}
if (!fb) {
- DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
+ drm_dbg_kms(&i915->drm,
+ "no active fbs found, not using BIOS config\n");
goto out;
}
@@ -346,13 +353,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc);
if (!crtc->state->active) {
- DRM_DEBUG_KMS("pipe %c not active, skipping\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm,
+ "pipe %c not active, skipping\n",
+ pipe_name(intel_crtc->pipe));
continue;
}
- DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
- pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
+ pipe_name(intel_crtc->pipe));
/*
* See if the plane fb we found above will fit on this
@@ -362,9 +370,10 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, fb->base.pitches[0]);
+ drm_dbg_kms(&i915->drm,
+ "fb not wide enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, fb->base.pitches[0]);
fb = NULL;
break;
}
@@ -372,28 +381,32 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
- pipe_name(intel_crtc->pipe),
- crtc->state->adjusted_mode.crtc_hdisplay,
- crtc->state->adjusted_mode.crtc_vdisplay,
- fb->base.format->cpp[0] * 8,
- cur_size);
+ drm_dbg_kms(&i915->drm,
+ "pipe %c area: %dx%d, bpp: %d, size: %d\n",
+ pipe_name(intel_crtc->pipe),
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
+ fb->base.format->cpp[0] * 8,
+ cur_size);
if (cur_size > max_size) {
- DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
- pipe_name(intel_crtc->pipe),
- cur_size, max_size);
+ drm_dbg_kms(&i915->drm,
+ "fb not big enough for plane %c (%d vs %d)\n",
+ pipe_name(intel_crtc->pipe),
+ cur_size, max_size);
fb = NULL;
break;
}
- DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
- pipe_name(intel_crtc->pipe),
- max_size, cur_size);
+ drm_dbg_kms(&i915->drm,
+ "fb big enough for plane %c (%d >= %d)\n",
+ pipe_name(intel_crtc->pipe),
+ max_size, cur_size);
}
if (!fb) {
- DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
+ drm_dbg_kms(&i915->drm,
+ "BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
- DRM_DEBUG_KMS("using BIOS fb for initial console\n");
+ drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
* processing, fbdev will perform a full connector reprobe if a hotplug event
* was received while HPD was suspended.
*/
-static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
+ struct intel_fbdev *ifbdev = i915->fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
mutex_unlock(&ifbdev->hpd_lock);
if (send_hpd) {
- DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+ drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
}
@@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
- intel_fbdev_hpd_set_suspend(ifbdev, state);
+ intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index a0cc894c3868..6f72feb14f3e 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -71,6 +71,7 @@ struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -106,8 +107,8 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
state->num_global_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
- obj, obj_state, state);
+ drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
return obj_state;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ee0f27ea2810..d3ad10653b2e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1391,6 +1391,7 @@ static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1431,7 +1432,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
hdcp->seq_num_m++;
if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
- DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
return -1;
}
@@ -2075,7 +2076,8 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 7c12ad609b1f..86bbaec120cc 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -11,6 +11,7 @@
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
@@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
-void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 39930232b253..6b1bc955124c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -707,13 +707,15 @@ void intel_read_infoframe(struct intel_encoder *encoder,
/* see comment above for the reason for this offset */
ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
if (ret) {
- DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ drm_dbg_kms(encoder->base.dev,
+ "Failed to unpack infoframe type 0x%02x\n", type);
return;
}
if (frame->any.type != type)
- DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
- frame->any.type, type);
+ drm_dbg_kms(encoder->base.dev,
+ "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
}
static bool
@@ -853,7 +855,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "couldn't set HDR metadata in infoframe\n");
return false;
}
@@ -893,8 +896,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
@@ -906,8 +910,9 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1264,8 +1269,8 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
- enable ? "Enabling" : "Disabling");
+ drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
adapter, enable);
@@ -1346,13 +1351,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1361,11 +1367,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1373,11 +1382,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
u8 *bstatus)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1385,12 +1397,14 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
bool *repeater_present)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1401,11 +1415,14 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
u8 *ri_prime)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ ret);
return ret;
}
@@ -1413,12 +1430,14 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
bool *ksv_ready)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1429,11 +1448,13 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
int num_downstream, u8 *ksv_fifo)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1443,6 +1464,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
int i, u32 *part)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1451,7 +1473,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ i, ret);
return ret;
}
@@ -1474,12 +1497,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
if (ret) {
- DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
if (ret) {
- DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1500,8 +1525,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
if (ret) {
- DRM_ERROR("%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1536,10 +1561,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
+ drm_err(&i915->drm,
+ "Ri' mismatch detected, link check failed (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1588,16 +1616,18 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
}
static inline
-int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
- ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
if (ret < 0) {
- DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ ret);
return ret;
}
@@ -1617,6 +1647,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, bool paired)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1631,8 +1662,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
- msg_id, ret, timeout);
+ drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
return ret ? ret : msg_sz;
}
@@ -1651,6 +1682,7 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
u8 msg_id, void *buf, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1666,15 +1698,17 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
* available buffer.
*/
if (ret > size) {
- DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
- ret, size);
+ drm_dbg_kms(&i915->drm,
+ "msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
return -1;
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
if (ret)
- DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+ drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ msg_id, ret);
return ret;
}
@@ -1870,15 +1904,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
- DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
+ drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
-static void g4x_enable_hdmi(struct intel_encoder *encoder,
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1900,7 +1936,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void ibx_enable_hdmi(struct intel_encoder *encoder,
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1951,7 +1988,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void cpt_enable_hdmi(struct intel_encoder *encoder,
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2004,13 +2042,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
}
-static void vlv_enable_hdmi(struct intel_encoder *encoder,
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
}
-static void intel_disable_hdmi(struct intel_encoder *encoder,
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2068,7 +2108,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void g4x_disable_hdmi(struct intel_encoder *encoder,
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2076,10 +2117,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_hdmi(struct intel_encoder *encoder,
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2088,11 +2130,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
old_crtc_state, old_conn_state);
}
-static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
@@ -2289,10 +2332,12 @@ static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
if (!connector->ycbcr_420_allowed) {
- DRM_ERROR("Platform doesn't support YCBCR420 output\n");
+ drm_err(&i915->drm,
+ "Platform doesn't support YCBCR420 output\n");
return false;
}
@@ -2300,7 +2345,8 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
/* YCBCR 420 output conversion needs a scaler */
if (skl_update_scaler_crtc(config)) {
- DRM_DEBUG_KMS("Scaler allocation for output failed\n");
+ drm_dbg_kms(&i915->drm,
+ "Scaler allocation for output failed\n");
return false;
}
@@ -2341,6 +2387,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -2365,13 +2412,15 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->pipe_bpp > bpc * 3)
crtc_state->pipe_bpp = bpc * 3;
- DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
- bpc, crtc_state->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
false, crtc_state->has_hdmi_sink) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
- crtc_state->port_clock);
+ drm_dbg_kms(&i915->drm,
+ "unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
return -EINVAL;
}
@@ -2434,7 +2483,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
- DRM_ERROR("Can't support YCBCR420 output\n");
+ drm_err(&dev_priv->drm,
+ "Can't support YCBCR420 output\n");
return -EINVAL;
}
}
@@ -2474,25 +2524,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
- intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
+ conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad AVI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad SPD infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- DRM_DEBUG_KMS("bad DRM infoframe\n");
+ drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2542,7 +2593,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
*/
if (has_edid && !connector->override_edid &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
- DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
type = DRM_DP_DUAL_MODE_NONE;
@@ -2556,9 +2608,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
hdmi->dp_dual_mode.max_tmds_clock =
drm_dp_dual_mode_max_tmds_clock(type, adapter);
- DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
- drm_dp_get_dual_mode_type_name(type),
- hdmi->dp_dual_mode.max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
}
static bool
@@ -2578,7 +2631,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
- DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
@@ -2610,8 +2664,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2642,8 +2696,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_hdmi_unset_edid(connector);
@@ -2664,7 +2720,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
return intel_connector_update_modes(connector, edid);
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2678,7 +2735,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config, conn_state);
}
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2695,12 +2753,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2709,7 +2768,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2718,14 +2778,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_pre_pll_enable(encoder, pipe_config);
}
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder, old_crtc_state);
}
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2733,7 +2795,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_phy_reset_lanes(encoder, old_crtc_state);
}
-static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -2748,7 +2811,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -2766,7 +2830,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(encoder, pipe_config, conn_state);
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
@@ -2785,6 +2849,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
struct kobject *i2c_kobj = &adapter->dev.kobj;
struct kobject *connector_kobj = &connector->kdev->kobj;
@@ -2792,7 +2857,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
if (ret)
- DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+ drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
}
static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
@@ -2921,9 +2986,10 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
if (!sink_scrambling->supported)
return true;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
- yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+ connector->base.id, connector->name,
+ yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
return drm_scdc_set_high_tmds_clock_ratio(adapter,
@@ -3065,8 +3131,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = intel_bios_alternate_ddc_pin(encoder);
if (ddc_pin) {
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (VBT)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3083,8 +3150,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
return ddc_pin;
}
@@ -3141,8 +3209,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
@@ -3186,7 +3255,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -3205,16 +3275,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- DRM_DEBUG_KMS("CEC notifier get failed\n");
+ drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
}
static enum intel_hotplug_state
intel_hdmi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector, bool irq_received)
+ struct intel_connector *connector)
{
enum intel_hotplug_state state;
- state = intel_encoder_hotplug(encoder, connector, irq_received);
+ state = intel_encoder_hotplug(encoder, connector);
/*
* On many platforms the HDMI live state signal is known to be
@@ -3228,7 +3298,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
* time around we didn't detect any change in the sink's connection
* status.
*/
- if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
state = INTEL_HOTPLUG_RETRY;
return state;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index a091442efba4..4f6f560e093e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
enum intel_hotplug_state
intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_encoder *encoder =
intel_attached_encoder(connector);
+ if (hpd_event_bits & hpd_bit)
+ connector->hotplug_retries = 0;
+ else
+ connector->hotplug_retries++;
+
drm_dbg_kms(&dev_priv->drm,
- "Connector %s (pin %i) received hotplug event.\n",
- connector->base.name, pin);
+ "Connector %s (pin %i) received hotplug event. (retry %d)\n",
+ connector->base.name, pin,
+ connector->hotplug_retries);
- switch (encoder->hotplug(encoder, connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 1e6b4fda2900..777b0743257e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -15,8 +15,7 @@ enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received);
+ struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9a067effcfa0..fe591f82163e 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
-static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+static void intel_pre_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
/*
* Sets the power state for the panel.
*/
-static void intel_enable_lvds(struct intel_encoder *encoder,
+static void intel_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
intel_panel_enable_backlight(pipe_config, conn_state);
}
-static void intel_disable_lvds(struct intel_encoder *encoder,
+static void intel_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
-static void gmch_disable_lvds(struct intel_encoder *encoder,
+static void gmch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_lvds(struct intel_encoder *encoder,
+static void pch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
-static void pch_post_disable_lvds(struct intel_encoder *encoder,
+static void pch_post_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 481187223101..6e1d66323223 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
- engine = dev_priv->engine[RCS0];
+ engine = dev_priv->gt.engine[RCS0];
if (!engine || !engine->kernel_context)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 276f43870802..08bfecfbe681 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -684,9 +684,10 @@ static void
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(connector, level);
panel->backlight.set(conn_state, level);
@@ -867,8 +868,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -1244,7 +1245,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -1335,6 +1336,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
int intel_backlight_device_register(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
@@ -1374,14 +1376,15 @@ int intel_backlight_device_register(struct intel_connector *connector)
&intel_backlight_device_ops, &props);
if (IS_ERR(panel->backlight.device)) {
- DRM_ERROR("Failed to register backlight: %ld\n",
- PTR_ERR(panel->backlight.device));
+ drm_err(&i915->drm, "Failed to register backlight: %ld\n",
+ PTR_ERR(panel->backlight.device));
panel->backlight.device = NULL;
return -ENODEV;
}
- DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
- connector->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Connector %s backlight sysfs interface registered\n",
+ connector->base.name);
return 0;
}
@@ -1931,7 +1934,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index cedeea443336..11f2f6b628d8 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -37,7 +37,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
+void intel_panel_update_backlight(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index fd9b146e3aba..a0569fdfeb16 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -137,41 +137,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, imr_reg, val);
}
-static void psr_event_print(u32 val, bool psr2_enabled)
+static void psr_event_print(struct drm_i915_private *i915,
+ u32 val, bool psr2_enabled)
{
- DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
- DRM_DEBUG_KMS("\tPSR2 disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- DRM_DEBUG_KMS("\tGraphics reset\n");
+ drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- DRM_DEBUG_KMS("\tPCH interrupt\n");
+ drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- DRM_DEBUG_KMS("\tMemory up\n");
+ drm_dbg_kms(&i915->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- DRM_DEBUG_KMS("\tFront buffer modification\n");
+ drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+ drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- DRM_DEBUG_KMS("\tPIPE registers updated\n");
+ drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- DRM_DEBUG_KMS("\tRegister updated\n");
+ drm_dbg_kms(&i915->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- DRM_DEBUG_KMS("\tHDCP enabled\n");
+ drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- DRM_DEBUG_KMS("\tKVMR session enabled\n");
+ drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- DRM_DEBUG_KMS("\tVBI enabled\n");
+ drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- DRM_DEBUG_KMS("\tLPSP mode exited\n");
+ drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
- DRM_DEBUG_KMS("\tPSR disabled\n");
+ drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
@@ -209,7 +210,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
- psr_event_print(val, psr2_enabled);
+ psr_event_print(dev_priv, val, psr2_enabled);
}
}
@@ -249,18 +250,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 val;
ssize_t r;
@@ -273,7 +277,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
if (r != 2)
- DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
/*
* Spec says that if the value read is 0 the default granularity should
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 637d8fe2f8c2..bc6c26818e15 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
#undef UPDATE_PROPERTY
}
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
SDVO_AUDIO_PRESENCE_DETECT);
}
-static void intel_disable_sdvo(struct intel_encoder *encoder,
+static void intel_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
}
-static void pch_disable_sdvo(struct intel_encoder *encoder,
+static void pch_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+static void pch_post_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
+ intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
}
-static void intel_enable_sdvo(struct intel_encoder *encoder,
+static void intel_enable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
static enum intel_hotplug_state
intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector,
- bool irq_received)
+ struct intel_connector *connector)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector, irq_received);
+ return intel_encoder_hotplug(encoder, connector);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index deda351719db..0000ec7055f7 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
};
static const u32 skl_planar_formats[] = {
@@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
};
static const u32 glk_planar_formats[] = {
@@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_P010,
DRM_FORMAT_P012,
DRM_FORMAT_P016,
@@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_Y210,
DRM_FORMAT_Y212,
DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
DRM_FORMAT_XVYU12_16161616,
DRM_FORMAT_XVYU16161616,
@@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -2817,19 +2824,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
+static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
{
+ /* Wa_14010477008:tgl[a0..c0] */
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
+ return false;
+
return plane_id < PLANE_SPRITE4;
}
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
+ struct drm_i915_private *dev_priv = to_i915(_plane->dev);
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(plane->id))
+ if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
return false;
/* fall through */
case DRM_FORMAT_MOD_LINEAR:
@@ -2854,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
@@ -2998,9 +3012,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
-static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
+static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
{
- if (gen12_plane_supports_mc_ccs(plane_id))
+ if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
return gen12_plane_format_modifiers_mc_ccs;
else
return gen12_plane_format_modifiers_rc_ccs;
@@ -3070,7 +3085,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (INTEL_GEN(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(plane_id);
+ modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
plane_funcs = &gen12_plane_funcs;
} else {
if (plane->has_ccs)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 9b850c11aa78..275618bedf32 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -152,6 +152,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
u32 live_status_mask)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 valid_hpd_mask;
if (dig_port->tc_legacy_port)
@@ -164,8 +165,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
- dig_port->tc_port_name, live_status_mask);
+ drm_err(&i915->drm,
+ "Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}
@@ -233,8 +235,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
- enableddisabled(enable));
+ dig_port->tc_port_name, enableddisabled(enable));
return false;
}
@@ -286,11 +287,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int max_lanes;
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY not ready\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
}
@@ -311,15 +313,16 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
- DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
goto out_set_safe_mode;
}
if (max_lanes < required_lanes) {
- DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
- dig_port->tc_port_name,
- max_lanes, required_lanes);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
goto out_set_safe_mode;
}
@@ -357,15 +360,17 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
if (!icl_tc_phy_status_complete(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
if (icl_tc_phy_is_in_safe_mode(dig_port)) {
- DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
return false;
}
@@ -438,6 +443,7 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
int active_links = 0;
@@ -451,8 +457,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
- DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
goto out;
@@ -462,9 +469,9 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
icl_tc_phy_connect(dig_port, 1);
out:
- DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
mutex_unlock(&dig_port->tc_lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d2e3a3a323e9..fbe12aad7d58 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
}
static void
-intel_enable_tv(struct intel_encoder *encoder,
+intel_enable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder,
}
static void
-intel_disable_tv(struct intel_encoder *encoder,
+intel_disable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
(color_conversion->bv << 16) | color_conversion->av);
}
-static void intel_tv_pre_enable(struct intel_encoder *encoder,
+static void intel_tv_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name, force);
if (force) {
struct intel_load_detect_pipe tmp;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f4c362dc6e15..4e18d4627065 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -759,7 +759,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder);
* DSI port enable has to be done before pipe and plane enable, so we do it in
* the pre_enable hook instead of the enable hook.
*/
-static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
@@ -858,7 +859,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
-static void bxt_dsi_enable(struct intel_encoder *encoder,
+static void bxt_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -871,14 +873,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder,
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
-static void intel_dsi_disable(struct intel_encoder *encoder,
+static void intel_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(old_conn_state);
@@ -906,7 +910,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
vlv_dsi_clear_device_ready(encoder);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder,
+static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 026999b34abd..11d9135cf21a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -570,23 +570,19 @@ static void engines_idle_release(struct i915_gem_context *ctx,
engines->ctx = i915_gem_context_get(ctx);
for_each_gem_engine(ce, engines, it) {
- struct dma_fence *fence;
- int err = 0;
+ int err;
/* serialises with execbuf */
- RCU_INIT_POINTER(ce->gem_context, NULL);
+ set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
if (!intel_context_pin_if_active(ce))
continue;
- fence = i915_active_fence_get(&ce->timeline->last_request);
- if (fence) {
- err = i915_sw_fence_await_dma_fence(&engines->fence,
- fence, 0,
- GFP_KERNEL);
- dma_fence_put(fence);
- }
+ /* Wait until context is finally scheduled out and retired */
+ err = i915_sw_fence_await_active(&engines->fence,
+ &ce->active,
+ I915_ACTIVE_AWAIT_BARRIER);
intel_context_unpin(ce);
- if (err < 0)
+ if (err)
goto kill;
}
@@ -757,21 +753,46 @@ err_free:
return ERR_PTR(err);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ GEM_BUG_ON(!engines);
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
static int
context_apply_all(struct i915_gem_context *ctx,
int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -786,11 +807,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = i915_gem_context_vm(ctx);
+ struct i915_address_space *old;
+ old = rcu_replace_pointer(ctx->vm,
+ i915_vm_open(vm),
+ lockdep_is_held(&ctx->mutex));
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -1069,30 +1092,6 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
-{
- struct i915_gem_engines *engines;
-
- rcu_read_lock();
- do {
- engines = rcu_dereference(ctx->engines);
- if (unlikely(!engines))
- break;
-
- if (unlikely(!i915_sw_fence_await(&engines->fence)))
- continue;
-
- if (likely(engines == rcu_access_pointer(ctx->engines)))
- break;
-
- i915_sw_fence_complete(&engines->fence);
- } while (1);
- rcu_read_unlock();
-
- return engines;
-}
-
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -1401,10 +1400,10 @@ static int get_ringsize(struct i915_gem_context *ctx,
return 0;
}
-static int
-user_to_context_sseu(struct drm_i915_private *i915,
- const struct drm_i915_gem_context_param_sseu *user,
- struct intel_sseu *context)
+int
+i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context)
{
const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
@@ -1539,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
goto out_ce;
}
- ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+ ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret)
goto out_ce;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 57b7ae2893e1..3702b2fb27ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
- struct intel_context *ce = ERR_PTR(-EINVAL);
+ struct intel_context *ce;
rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
- if (likely(idx < e->num_engines && e->engines[idx]))
+ if (unlikely(!e)) /* context was closed! */
+ ce = ERR_PTR(-ENOENT);
+ else if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]);
+ else
+ ce = ERR_PTR(-EINVAL);
} rcu_read_unlock();
return ce;
@@ -221,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
+int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context);
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 0cc40e77bbd2..af43e82f45c7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -369,7 +369,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return;
mutex_lock(&i915->ggtt.vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3f4f28e9468..517898aa634c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -40,6 +40,11 @@ struct eb_vma {
u32 handle;
};
+struct eb_vma_array {
+ struct kref kref;
+ struct eb_vma vma[];
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -52,7 +57,6 @@ enum {
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
-#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_INTERNAL_FLAGS (~0u << 31)
@@ -283,6 +287,7 @@ struct i915_execbuffer {
*/
int lut_size;
struct hlist_head *buckets; /** ht for relocation handles */
+ struct eb_vma_array *array;
};
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
@@ -292,8 +297,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
eb->args->batch_len);
}
+static struct eb_vma_array *eb_vma_array_create(unsigned int count)
+{
+ struct eb_vma_array *arr;
+
+ arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
+ if (!arr)
+ return NULL;
+
+ kref_init(&arr->kref);
+ arr->vma[0].vma = NULL;
+
+ return arr;
+}
+
+static inline void eb_unreserve_vma(struct eb_vma *ev)
+{
+ struct i915_vma *vma = ev->vma;
+
+ if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
+ __i915_vma_unpin_fence(vma);
+
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __i915_vma_unpin(vma);
+
+ ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
+ __EXEC_OBJECT_HAS_FENCE);
+}
+
+static void eb_vma_array_destroy(struct kref *kref)
+{
+ struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
+ struct eb_vma *ev = arr->vma;
+
+ while (ev->vma) {
+ eb_unreserve_vma(ev);
+ i915_vma_put(ev->vma);
+ ev++;
+ }
+
+ kvfree(arr);
+}
+
+static void eb_vma_array_put(struct eb_vma_array *arr)
+{
+ kref_put(&arr->kref, eb_vma_array_destroy);
+}
+
static int eb_create(struct i915_execbuffer *eb)
{
+ /* Allocate an extra slot for use by the command parser + sentinel */
+ eb->array = eb_vma_array_create(eb->buffer_count + 2);
+ if (!eb->array)
+ return -ENOMEM;
+
+ eb->vma = eb->array->vma;
+
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
unsigned int size = 1 + ilog2(eb->buffer_count);
@@ -327,8 +386,10 @@ static int eb_create(struct i915_execbuffer *eb)
break;
} while (--size);
- if (unlikely(!size))
+ if (unlikely(!size)) {
+ eb_vma_array_put(eb->array);
return -ENOMEM;
+ }
eb->lut_size = size;
} else {
@@ -368,6 +429,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return false;
}
+static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
+ unsigned int exec_flags)
+{
+ u64 pin_flags = 0;
+
+ if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
+ pin_flags |= PIN_GLOBAL;
+
+ /*
+ * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+ * limit address to the first 4GBs for unflagged objects.
+ */
+ if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ pin_flags |= PIN_ZONE_4G;
+
+ if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
+ pin_flags |= PIN_MAPPABLE;
+
+ if (exec_flags & EXEC_OBJECT_PINNED)
+ pin_flags |= entry->offset | PIN_OFFSET_FIXED;
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
+ pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ return pin_flags;
+}
+
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
@@ -385,8 +472,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
- if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
- return false;
+ /* Attempt to reuse the current location if available */
+ if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
+ if (entry->flags & EXEC_OBJECT_PINNED)
+ return false;
+
+ /* Failing that pick any _free_ space if suitable */
+ if (unlikely(i915_vma_pin(vma,
+ entry->pad_to_size,
+ entry->alignment,
+ eb_pin_flags(entry, ev->flags) |
+ PIN_USER | PIN_NOEVICT)))
+ return false;
+ }
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
@@ -402,26 +500,6 @@ eb_pin_vma(struct i915_execbuffer *eb,
return !eb_vma_misplaced(entry, vma, ev->flags);
}
-static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
-{
- GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
-
- if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- __i915_vma_unpin_fence(vma);
-
- __i915_vma_unpin(vma);
-}
-
-static inline void
-eb_unreserve_vma(struct eb_vma *ev)
-{
- if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
- __eb_unreserve_vma(ev->vma, ev->flags);
- ev->flags &= ~__EXEC_OBJECT_RESERVED;
-}
-
static int
eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
@@ -481,7 +559,7 @@ eb_add_vma(struct i915_execbuffer *eb,
GEM_BUG_ON(i915_vma_is_closed(vma));
- ev->vma = i915_vma_get(vma);
+ ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -547,28 +625,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
u64 pin_flags)
{
struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int exec_flags = ev->flags;
struct i915_vma *vma = ev->vma;
int err;
- if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
- pin_flags |= PIN_GLOBAL;
-
- /*
- * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
- * limit address to the first 4GBs for unflagged objects.
- */
- if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- pin_flags |= PIN_ZONE_4G;
-
- if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
- pin_flags |= PIN_MAPPABLE;
-
- if (exec_flags & EXEC_OBJECT_PINNED)
- pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
- pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
-
if (drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(entry, vma, ev->flags)) {
err = i915_vma_unbind(vma);
@@ -578,7 +637,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
err = i915_vma_pin(vma,
entry->pad_to_size, entry->alignment,
- pin_flags);
+ eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
return err;
@@ -587,7 +646,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
@@ -595,10 +654,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
@@ -728,77 +787,117 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static int eb_lookup_vmas(struct i915_execbuffer *eb)
+static int __eb_add_lut(struct i915_execbuffer *eb,
+ u32 handle, struct i915_vma *vma)
{
- struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
- struct drm_i915_gem_object *obj;
- unsigned int i, batch;
+ struct i915_gem_context *ctx = eb->gem_context;
+ struct i915_lut_handle *lut;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
- return -ENOENT;
+ lut = i915_lut_handle_alloc();
+ if (unlikely(!lut))
+ return -ENOMEM;
- INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
+ i915_vma_get(vma);
+ if (!atomic_fetch_inc(&vma->open_count))
+ i915_vma_reopen(vma);
+ lut->handle = handle;
+ lut->ctx = ctx;
+
+ /* Check that the context hasn't been closed in the meantime */
+ err = -EINTR;
+ if (!mutex_lock_interruptible(&ctx->mutex)) {
+ err = -ENOENT;
+ if (likely(!i915_gem_context_is_closed(ctx)))
+ err = radix_tree_insert(&ctx->handles_vma, handle, vma);
+ if (err == 0) { /* And nor has this handle */
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ i915_gem_object_lock(obj);
+ if (idr_find(&eb->file->object_idr, handle) == obj) {
+ list_add(&lut->obj_link, &obj->lut_list);
+ } else {
+ radix_tree_delete(&ctx->handles_vma, handle);
+ err = -ENOENT;
+ }
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&ctx->mutex);
+ }
+ if (unlikely(err))
+ goto err;
- batch = eb_batch_index(eb);
+ return 0;
- for (i = 0; i < eb->buffer_count; i++) {
- u32 handle = eb->exec[i].handle;
- struct i915_lut_handle *lut;
+err:
+ atomic_dec(&vma->open_count);
+ i915_vma_put(vma);
+ i915_lut_handle_free(lut);
+ return err;
+}
+
+static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
+{
+ do {
+ struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ int err;
- vma = radix_tree_lookup(handles_vma, handle);
+ rcu_read_lock();
+ vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
+ if (likely(vma))
+ vma = i915_vma_tryget(vma);
+ rcu_read_unlock();
if (likely(vma))
- goto add_vma;
+ return vma;
obj = i915_gem_object_lookup(eb->file, handle);
- if (unlikely(!obj)) {
- err = -ENOENT;
- goto err_vma;
- }
+ if (unlikely(!obj))
+ return ERR_PTR(-ENOENT);
vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
+ i915_gem_object_put(obj);
+ return vma;
}
- lut = i915_lut_handle_alloc();
- if (unlikely(!lut)) {
- err = -ENOMEM;
- goto err_obj;
- }
+ err = __eb_add_lut(eb, handle, vma);
+ if (likely(!err))
+ return vma;
- err = radix_tree_insert(handles_vma, handle, vma);
- if (unlikely(err)) {
- i915_lut_handle_free(lut);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (err != -EEXIST)
+ return ERR_PTR(err);
+ } while (1);
+}
+
+static int eb_lookup_vmas(struct i915_execbuffer *eb)
+{
+ unsigned int batch = eb_batch_index(eb);
+ unsigned int i;
+ int err = 0;
- /* transfer ref to lut */
- if (!atomic_fetch_inc(&vma->open_count))
- i915_vma_reopen(vma);
- lut->handle = handle;
- lut->ctx = eb->gem_context;
+ INIT_LIST_HEAD(&eb->relocs);
+ INIT_LIST_HEAD(&eb->unbound);
- i915_gem_object_lock(obj);
- list_add(&lut->obj_link, &obj->lut_list);
- i915_gem_object_unlock(obj);
+ for (i = 0; i < eb->buffer_count; i++) {
+ struct i915_vma *vma;
+
+ vma = eb_lookup_vma(eb, eb->exec[i].handle);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
-add_vma:
err = eb_validate_vma(eb, &eb->exec[i], vma);
- if (unlikely(err))
- goto err_vma;
+ if (unlikely(err)) {
+ i915_vma_put(vma);
+ break;
+ }
eb_add_vma(eb, i, batch, vma);
}
- return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err_vma:
eb->vma[i].vma = NULL;
return err;
}
@@ -823,31 +922,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
-static void eb_release_vmas(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- struct eb_vma *ev = &eb->vma[i];
- struct i915_vma *vma = ev->vma;
-
- if (!vma)
- break;
-
- eb->vma[i].vma = NULL;
-
- if (ev->flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, ev->flags);
-
- i915_vma_put(vma);
- }
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->array)
+ eb_vma_array_put(eb->array);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -896,11 +977,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
static void reloc_gpu_flush(struct reloc_cache *cache)
{
- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
+ struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
- i915_gem_object_unpin_map(cache->rq->batch->obj);
+ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1218,6 +1301,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
+static inline bool use_reloc_gpu(struct i915_vma *vma)
+{
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC)
+ return false;
+
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
+
static u64
relocate_entry(struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
@@ -1229,9 +1323,7 @@ relocate_entry(struct i915_vma *vma,
bool wide = eb->reloc_cache.use_64bit_reloc;
void *vaddr;
- if (!eb->reloc_cache.vaddr &&
- (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !dma_resv_test_signaled_rcu(vma->resv, true))) {
+ if (!eb->reloc_cache.vaddr && use_reloc_gpu(vma)) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1409,12 +1501,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
- struct drm_i915_gem_relocation_entry __user *urelocs;
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
- unsigned int remain;
+ struct drm_i915_gem_relocation_entry __user *urelocs =
+ u64_to_user_ptr(entry->relocs_ptr);
+ unsigned long remain = entry->relocation_count;
- urelocs = u64_to_user_ptr(entry->relocs_ptr);
- remain = entry->relocation_count;
if (unlikely(remain > N_RELOC(ULONG_MAX)))
return -EINVAL;
@@ -1423,13 +1514,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
+ if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
return -EFAULT;
do {
struct drm_i915_gem_relocation_entry *r = stack;
unsigned int count =
- min_t(unsigned int, remain, ARRAY_SIZE(stack));
+ min_t(unsigned long, remain, ARRAY_SIZE(stack));
unsigned int copied;
/*
@@ -1477,10 +1568,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* can read from this userspace address.
*/
offset = gen8_canonical_addr(offset & ~UPDATE);
- if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
- remain = -EFAULT;
- goto out;
- }
+ __put_user(offset,
+ &urelocs[r - stack].presumed_offset);
}
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
@@ -1494,9 +1583,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
{
int err;
- mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
- mutex_unlock(&eb->gem_context->mutex);
if (err)
return err;
@@ -1597,19 +1684,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
err = i915_vma_move_to_active(vma, eb->request, flags);
i915_vma_unlock(vma);
-
- __eb_unreserve_vma(vma, flags);
- i915_vma_put(vma);
-
- ev->vma = NULL;
+ eb_unreserve_vma(ev);
}
ww_acquire_fini(&acquire);
+ eb_vma_array_put(fetch_and_zero(&eb->array));
+
if (unlikely(err))
goto err_skip;
- eb->exec = NULL;
-
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->engine->gt);
return 0;
@@ -1784,7 +1867,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
dma_resv_unlock(shadow->resv);
- dma_fence_work_commit(&pw->base);
+ dma_fence_work_commit_imm(&pw->base);
return 0;
err_batch_unlock:
@@ -1861,6 +1944,7 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
eb->batch = &eb->vma[eb->buffer_count++];
+ eb->vma[eb->buffer_count].vma = NULL;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
@@ -2316,7 +2400,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
prev = __i915_request_commit(rq);
/* Check that the context wasn't destroyed before submission */
- if (likely(rcu_access_pointer(eb->context->gem_context))) {
+ if (likely(!intel_context_is_closed(eb->context))) {
attr = eb->gem_context->sched;
/*
@@ -2348,9 +2432,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
__i915_request_skip(rq);
}
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
/* Try to clean up the client's timeline after submitting the request */
if (prev)
@@ -2386,8 +2468,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
- eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2594,8 +2674,6 @@ err_parse:
if (batch->private)
intel_engine_pool_put(batch->private);
err_vma:
- if (eb.exec)
- eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
eb_unpin_engine(&eb);
@@ -2615,7 +2693,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
+ return sizeof(struct drm_i915_gem_exec_object2);
}
static bool check_buffer_count(size_t count)
@@ -2671,7 +2749,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
drm_dbg(&i915->drm,
@@ -2749,8 +2827,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- /* Allocate an extra slot for use by the command parser */
- exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+ exec2_list = kvmalloc_array(count, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5da9f9e534b9..3f01cdd1a39b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -206,7 +206,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
}
obj->mmo.offsets = RB_ROOT;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index a0b10bcd8d8a..54ee658bb168 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
#define STRIDE_MASK (~TILING_MASK)
- /** Count of VMA actually bound by this object */
- atomic_t bind_count;
-
struct {
/*
* Protects the pages and their use. Do not use directly, but
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 24f4cadea114..5d855fcd5c0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
- GEM_BUG_ON(atomic_read(&obj->bind_count));
-
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 03e5eb4c99d1..5b65ce738b16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -27,18 +27,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return false;
/*
- * Only report true if by unbinding the object and putting its pages
- * we can actually make forward progress towards freeing physical
- * pages.
- *
- * If the pages are pinned for any other reason than being bound
- * to the GPU, simply unbinding from the GPU is not going to succeed
- * in releasing our pin count on the pages themselves.
- */
- if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
- return false;
-
- /*
* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ if (!(shrink & I915_SHRINK_BOUND))
+ flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
i915_gem_object_is_framebuffer(obj))
continue;
- if (!(shrink & I915_SHRINK_BOUND) &&
- atomic_read(&obj->bind_count))
- continue;
-
if (!can_release_pages(obj))
continue;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 5557dfa83a7b..dc250278bd2c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
mutex_init(&i915->mm.stolen_lock);
if (intel_vgpu_active(i915)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index fa16f2c3f3ac..2b46c6530da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 54b86cf7f5d2..f4f933240b39 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1925,7 +1925,7 @@ static int mock_context_barrier(void *arg)
goto out;
}
- rq = igt_request_alloc(ctx, i915->engine[RCS0]);
+ rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
if (IS_ERR(rq)) {
pr_err("Request allocation failed!\n");
goto out;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 43912e9b683d..ef7abcb3f4ee 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1156,9 +1156,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
if (err)
goto out_unmap;
- GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
- !atomic_read(&obj->bind_count));
-
err = check_present(addr, obj->base.size);
if (err) {
pr_err("%s: was not present\n", obj->mm.region->name);
@@ -1175,7 +1172,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
pr_err("Failed to unbind object!\n");
goto out_unmap;
}
- GEM_BUG_ON(atomic_read(&obj->bind_count));
if (type != I915_MMAP_TYPE_GTT) {
__i915_gem_object_put_pages(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 2b6db6f799de..faa5b6d91795 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -14,7 +14,7 @@ static int igt_gem_object(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
+ int err;
/* Basic test to ensure we can create an object */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
index 6a5e9ab20b94..5e3725e62241 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
{ "engines", &engines_fops },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 75255aaacaed..1de5fbaa1cf9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -9,6 +9,7 @@
#include "debugfs_engines.h"
#include "debugfs_gt.h"
#include "debugfs_gt_pm.h"
+#include "uc/intel_uc_debugfs.h"
#include "i915_drv.h"
void debugfs_gt_register(struct intel_gt *gt)
@@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt)
debugfs_engines_register(gt, root);
debugfs_gt_pm_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
}
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count)
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data)
{
while (count--) {
- if (!files->eval || files->eval(gt))
+ umode_t mode = files->fops->write ? 0644 : 0444;
+ if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
- 0444, root, gt,
+ mode, root, data,
files->fops);
files++;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
index 4ea0f06cda8f..f77540f727e9 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.h
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt);
struct debugfs_gt_file {
const char *name;
const struct file_operations *fops;
- bool (*eval)(const struct intel_gt *gt);
+ bool (*eval)(void *data);
};
-void debugfs_gt_register_files(struct intel_gt *gt,
- struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count);
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count, void *data);
#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 059c9e5c002e..aab30d908072 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -506,8 +506,10 @@ static int llc_show(struct seq_file *m, void *data)
return 0;
}
-static bool llc_eval(const struct intel_gt *gt)
+static bool llc_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_LLC(gt->i915);
}
@@ -580,8 +582,10 @@ static int rps_boost_show(struct seq_file *m, void *data)
return 0;
}
-static bool rps_eval(const struct intel_gt *gt)
+static bool rps_eval(void *data)
{
+ struct intel_gt *gt = data;
+
return HAS_RPS(gt->i915);
}
@@ -597,5 +601,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
{ "rps_boost", &rps_boost_fops, rps_eval },
};
- debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbad7fe722ce..cbedba857d43 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
if (!--b->irq_enabled)
irq_disable(engine);
- b->irq_armed = false;
+ WRITE_ONCE(b->irq_armed, false);
intel_gt_pm_put_async(engine->gt);
}
@@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
- if (!b->irq_armed)
+ if (!READ_ONCE(b->irq_armed))
return;
spin_lock_irqsave(&b->irq_lock, flags);
@@ -233,7 +233,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- b->irq_armed = true;
+ WRITE_ONCE(b->irq_armed, true);
/*
* Since we are waiting on a request, the GPU should be busy
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 01474d3a558b..74ddb49b2941 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -97,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
+ GEM_BUG_ON(intel_context_is_closed(ce));
+
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
@@ -112,6 +114,11 @@ int __intel_context_do_pin(struct intel_context *ce)
goto out_release;
}
+ if (unlikely(intel_context_is_closed(ce))) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 18efad255124..07be021882cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -173,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 0f3b68b95c56..07cb83a0d017 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -62,10 +62,11 @@ struct intel_context {
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
#define CONTEXT_VALID_BIT 2
-#define CONTEXT_USE_SEMAPHORES 3
-#define CONTEXT_BANNED 4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
-#define CONTEXT_NOPREEMPT 6
+#define CONTEXT_CLOSED_BIT 3
+#define CONTEXT_USE_SEMAPHORES 4
+#define CONTEXT_BANNED 5
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
+#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state;
u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index b469de0dd9b6..d9ee64e2ef79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct intel_gt *gt);
int intel_engines_init(struct intel_gt *gt);
+void intel_engine_free_request_pool(struct intel_engine_cs *engine);
+
void intel_engines_release(struct intel_gt *gt);
void intel_engines_free(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 3aa8a652c16d..b1f8527f02c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -347,8 +347,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- i915->engine[id] = engine;
-
return 0;
}
@@ -425,17 +423,27 @@ void intel_engines_release(struct intel_gt *gt)
engine->release = NULL;
memset(&engine->reset, 0, sizeof(engine->reset));
-
- gt->i915->engine[id] = NULL;
}
}
+void intel_engine_free_request_pool(struct intel_engine_cs *engine)
+{
+ if (!engine->request_pool)
+ return;
+
+ kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
+}
+
void intel_engines_free(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Free the requests! dma-resv keeps fences around for an eternity */
+ rcu_barrier();
+
for_each_engine(engine, gt, id) {
+ intel_engine_free_request_pool(engine);
kfree(engine);
gt->engine[id] = NULL;
}
@@ -1225,6 +1233,49 @@ static void print_request(struct drm_printer *m,
name);
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+ int len = 0;
+
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
+ len = scnprintf(buf, sz,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
+
+ return len;
+}
+
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
@@ -1254,27 +1305,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-static struct intel_timeline *get_timeline(struct i915_request *rq)
-{
- struct intel_timeline *tl;
-
- /*
- * Even though we are holding the engine->active.lock here, there
- * is no control over the submission queue per-se and we are
- * inspecting the active state at a random point in time, with an
- * unknown queue. Play safe and make sure the timeline remains valid.
- * (Only being used for pretty printing, one extra kref shouldn't
- * cause a camel stampede!)
- */
- rcu_read_lock();
- tl = rcu_dereference(rq->timeline);
- if (!kref_get_unless_zero(&tl->kref))
- tl = NULL;
- rcu_read_unlock();
-
- return tl;
-}
-
static const char *repr_timer(const struct timer_list *t)
{
if (!READ_ONCE(t->expires))
@@ -1295,6 +1325,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+ if (HAS_EXECLISTS(dev_priv)) {
+ drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+ drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+ }
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
@@ -1387,39 +1423,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
- if (!i915_request_signaled(rq)) {
- struct intel_timeline *tl = get_timeline(rq);
-
- len += scnprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
-
- if (tl)
- intel_timeline_put(tl);
- }
+ "\t\tActive[%d]: ccid:%08x, ",
+ (int)(port - execlists->active),
+ upper_32_bits(rq->context->lrc_desc));
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
- struct intel_timeline *tl = get_timeline(rq);
- char hdr[80];
-
- snprintf(hdr, sizeof(hdr),
- "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- (int)(port - execlists->pending),
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
+ char hdr[160];
+ int len;
- if (tl)
- intel_timeline_put(tl);
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d]: ccid:%08x, ",
+ (int)(port - execlists->pending),
+ upper_32_bits(rq->context->lrc_desc));
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index dd825718e4e5..5136c8bf112d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- schedule_delayed_work(&engine->heartbeat.work, delay);
+ mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index b6cf284e3a2d..3be679741d22 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -181,7 +181,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* Ergo, if we put ourselves on the timelines.active_list
* (se intel_timeline_enter()) before we increment the
* engine->wakeref.count, we may see the request completion and retire
- * it causing an undeflow of the engine->wakeref.
+ * it causing an underflow of the engine->wakeref.
*/
flags = __timeline_mark_lock(ce);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index e52c2b0cb245..418df0a13145 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
intel_wakeref_put_async(&engine->wakeref);
}
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
{
intel_wakeref_unlock_wait(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 80cdde712842..01d4bd781a2f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -157,6 +157,15 @@ struct intel_engine_execlists {
struct i915_priolist default_priolist;
/**
+ * @yield: CCID at the time of the last semaphore-wait interrupt.
+ *
+ * Instead of leaving a semaphore busy-spinning on an engine, we would
+ * like to switch to another ready context, i.e. yielding the semaphore
+ * timeslice.
+ */
+ u32 yield;
+
+ /**
* @error_interrupt: CS Master EIR
*
* The CS generates an interrupt when it detects an error. We capture
@@ -308,6 +317,9 @@ struct intel_engine_cs {
struct list_head hold; /* ready requests, but on hold */
} active;
+ /* keep a request in reserve for a [pm] barrier under oom */
+ struct i915_request *request_pool;
+
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index aed498a0d032..eebd1190506f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->mappable_end);
}
- i915_ggtt_init_fences(ggtt);
+ intel_ggtt_init_fences(ggtt);
return 0;
}
@@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
dma_addr_t addr;
/*
@@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page.
*/
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0].encode);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr;
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
for_each_sgt_daddr(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0].encode, gte++);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -698,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct pagevec *pvec;
- fini_aliasing_ppgtt(&i915->ggtt);
+ fini_aliasing_ppgtt(ggtt);
- ggtt_cleanup_hw(&i915->ggtt);
+ intel_ggtt_fini_fences(ggtt);
+ ggtt_cleanup_hw(ggtt);
pvec = &i915->mm.wc_stash.pvec;
if (pvec->nr) {
@@ -767,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
if (ret) {
- DRM_ERROR("Scratch setup failed\n");
+ drm_err(&i915->drm, "Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
return ret;
@@ -833,7 +852,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+ drm_err(&i915->drm,
+ "Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (IS_CHERRYVIEW(i915))
@@ -980,7 +1000,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
* just a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
return -ENXIO;
}
@@ -988,7 +1009,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
+ drm_err(&i915->drm,
+ "Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
@@ -1035,7 +1057,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
+ drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
}
@@ -1058,7 +1080,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"Applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -1083,26 +1105,29 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
return ret;
if ((ggtt->vm.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->vm.total >> 20);
+ drm_err(&i915->drm,
+ "We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
ggtt->vm.total = 1ULL << 32;
ggtt->mappable_end =
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
if (ggtt->mappable_end > ggtt->vm.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%pa, total=%llx\n",
- &ggtt->mappable_end, ggtt->vm.total);
+ drm_err(&i915->drm,
+ "mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("DSM size = %lluM\n",
- (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+ drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ drm_dbg(&i915->drm, "GMADR size = %lluM\n",
+ (u64)ggtt->mappable_end >> 20);
+ drm_dbg(&i915->drm, "DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
return 0;
}
@@ -1120,7 +1145,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return ret;
if (intel_vtd_active())
- dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+ drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
}
@@ -1195,6 +1220,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
+
+ intel_ggtt_restore_fences(ggtt);
}
static struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index d152b648c73c..7fb36b12fe7a 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
return fence->ggtt->vm.gt->uncore;
}
-static void i965_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i965_write_fence_reg(struct i915_fence_reg *fence)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
@@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
- GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
- val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
- val |= vma->node.start;
+ val = fence->start + fence->size - I965_FENCE_PAGE;
+ val <<= 32;
+ val |= fence->start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i915_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i915_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+ unsigned int tiling = fence->tiling;
bool is_y_tiled = tiling == I915_TILING_Y;
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
@@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
stride /= 512;
GEM_BUG_ON(!is_power_of_2(stride));
- val = vma->node.start;
+ val = fence->start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I915_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
@@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void i830_write_fence_reg(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void i830_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
- if (vma) {
- unsigned int stride = i915_gem_object_get_stride(vma->obj);
-
- GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
- GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(!is_power_of_2(stride / 128));
- GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
- val = vma->node.start;
- if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
+ val = fence->start;
+ if (fence->tiling == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+ val |= I830_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
}
-static void fence_write(struct i915_fence_reg *fence,
- struct i915_vma *vma)
+static void fence_write(struct i915_fence_reg *fence)
{
struct drm_i915_private *i915 = fence_to_i915(fence);
@@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence,
*/
if (IS_GEN(i915, 2))
- i830_write_fence_reg(fence, vma);
+ i830_write_fence_reg(fence);
else if (IS_GEN(i915, 3))
- i915_write_fence_reg(fence, vma);
+ i915_write_fence_reg(fence);
else
- i965_write_fence_reg(fence, vma);
+ i965_write_fence_reg(fence);
/*
* Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
+}
- fence->dirty = false;
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+ return INTEL_GEN(fence_to_i915(fence)) < 4;
}
static int fence_update(struct i915_fence_reg *fence,
@@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *old;
int ret;
+ fence->tiling = 0;
if (vma) {
+ GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj));
+
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (drm_WARN(&uncore->i915->drm,
- !i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
- return -EINVAL;
+ if (gpu_uses_fence_registers(fence)) {
+ /* implicit 'unfenced' GPU blits */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ }
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
+ fence->start = vma->node.start;
+ fence->size = vma->fence_size;
+ fence->stride = i915_gem_object_get_stride(vma->obj);
+ fence->tiling = i915_gem_object_get_tiling(vma->obj);
}
+ WRITE_ONCE(fence->dirty, false);
old = xchg(&fence->vma, NULL);
if (old) {
/* XXX Ideally we would move the waiting to outside the mutex */
- ret = i915_vma_sync(old);
+ ret = i915_active_wait(&fence->active);
if (ret) {
fence->vma = old;
return ret;
@@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence,
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
- * to the runtime resume, see i915_gem_restore_fences().
+ * to the runtime resume, see intel_ggtt_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
@@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence,
}
WRITE_ONCE(fence->vma, vma);
- fence_write(fence, vma);
+ fence_write(fence);
if (vma) {
vma->fence = fence;
@@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
- *
- * Returns:
- *
- * 0 on success, negative error code on failure.
*/
-int i915_vma_revoke_fence(struct i915_vma *vma)
+void i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex);
if (!fence)
- return 0;
+ return;
- if (atomic_read(&fence->pin_count))
- return -EBUSY;
+ GEM_BUG_ON(fence->vma != vma);
+ GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
- return fence_update(fence, NULL);
+ fence->tiling = 0;
+ WRITE_ONCE(fence->vma, NULL);
+ vma->fence = NULL;
+
+ with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+ fence_write(fence);
}
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
@@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
}
/**
- * i915_gem_restore_fences - restore fence state
+ * intel_ggtt_restore_fences - restore fence state
* @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
{
int i;
- rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < ggtt->num_fences; i++) {
- struct i915_fence_reg *reg = &ggtt->fence_regs[i];
- struct i915_vma *vma = READ_ONCE(reg->vma);
-
- GEM_BUG_ON(vma && vma->fence != reg);
-
- /*
- * Commit delayed tiling changes if we have an object still
- * attached to the fence, otherwise just clear the fence.
- */
- if (vma && !i915_gem_object_is_tiled(vma->obj))
- vma = NULL;
-
- fence_write(reg, vma);
- }
- rcu_read_unlock();
+ for (i = 0; i < ggtt->num_fences; i++)
+ fence_write(&ggtt->fence_regs[i]);
}
/**
@@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void i915_gem_swizzle_page(struct page *page)
+static void swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
@@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(page);
+ swizzle_page(page);
set_page_dirty(page);
}
i++;
@@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
}
}
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
+ ggtt->fence_regs = kcalloc(num_fences,
+ sizeof(*ggtt->fence_regs),
+ GFP_KERNEL);
+ if (!ggtt->fence_regs)
+ num_fences = 0;
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+ i915_active_init(&fence->active, NULL, NULL);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(ggtt);
+ intel_ggtt_restore_fences(ggtt);
+}
+
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
+{
+ int i;
+
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ i915_active_fini(&fence->active);
+ }
+
+ kfree(ggtt->fence_regs);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 7bd521cd7cd7..9eef679e1311 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -22,12 +22,14 @@
*
*/
-#ifndef __I915_FENCE_REG_H__
-#define __I915_FENCE_REG_H__
+#ifndef __INTEL_GGTT_FENCING_H__
+#define __INTEL_GGTT_FENCING_H__
#include <linux/list.h>
#include <linux/types.h>
+#include "i915_active.h"
+
struct drm_i915_gem_object;
struct i915_ggtt;
struct i915_vma;
@@ -41,6 +43,7 @@ struct i915_fence_reg {
struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
+ struct i915_active active;
int id;
/**
* Whether the tiling parameters for the currently
@@ -51,20 +54,24 @@ struct i915_fence_reg {
* command (such as BLT on gen2/3), as a "fence".
*/
bool dirty;
+ u32 start;
+ u32 size;
+ u32 tiling;
+ u32 stride;
};
-/* i915_gem_fence_reg.c */
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
void intel_gt_init_swizzling(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d09f7596cb98..1c99cc72305a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -635,8 +635,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
{
__intel_gt_disable(gt);
- intel_uc_fini_hw(&gt->uc);
- intel_uc_fini(&gt->uc);
+ intel_uc_driver_remove(&gt->uc);
intel_engines_release(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..0cc7dd54f4f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
}
}
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
@@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT;
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
@@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT;
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
const u32 gt_interrupts[] = {
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 8b653c0f5e5f..3e8a56c7d818 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -204,7 +204,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -220,7 +220,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- dev_err(gt->i915->drm.dev,
+ drm_err(&gt->i915->drm,
"Failed to restart %s (%d)\n",
engine->name, err);
goto err_wedged;
@@ -324,6 +324,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
{
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
+ intel_ggtt_restore_fences(gt->ggtt);
return intel_uc_runtime_resume(&gt->uc);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 24c99d0838af..835ec184763e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
active |= flush_work(&engine->retire_work);
- active |= flush_work(&engine->wakeref.work);
+ active |= flush_delayed_work(&engine->wakeref.work);
}
return active;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b3116fe8d180..d93ebdf3fa0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -26,7 +26,6 @@
#include <drm/drm_mm.h>
#include "gt/intel_reset.h"
-#include "i915_gem_fence_reg.h"
#include "i915_selftest.h"
#include "i915_vma_types.h"
@@ -135,6 +134,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
+struct i915_fence_reg;
+
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
@@ -333,7 +334,7 @@ struct i915_ggtt {
u32 pin_bias;
unsigned int num_fences;
- struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct i915_fence_reg *fence_regs;
struct list_head fence_list;
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 112531b29f59..6fbad5e2343f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -238,6 +238,17 @@ __execlists_update_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 head);
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
static void mark_eio(struct i915_request *rq)
{
if (i915_request_completed(rq))
@@ -1154,6 +1165,7 @@ static void restore_default_state(struct intel_context *ce,
engine->context_size - PAGE_SIZE);
execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+ ce->runtime.last = intel_context_get_runtime(ce);
}
static void reset_active(struct i915_request *rq,
@@ -1195,17 +1207,6 @@ static void reset_active(struct i915_request *rq,
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -1415,6 +1416,23 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%s%llx:%lld%s prio %d",
+ prefix,
+ rq->fence.context, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
static __maybe_unused void
trace_ports(const struct intel_engine_execlists *execlists,
const char *msg,
@@ -1422,18 +1440,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
{
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
if (!ports[0])
return;
- ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
}
static inline bool
@@ -1663,7 +1677,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
}
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
+ struct i915_request *rq)
{
struct intel_engine_cs *old = ve->siblings[0];
@@ -1671,9 +1685,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) {
- list_move_tail(&ve->context.signal_link,
- &engine->breadcrumbs.signalers);
- intel_engine_signal_breadcrumbs(engine);
+ list_del_init(&ve->context.signal_link);
+
+ /*
+ * We cannot acquire the new engine->breadcrumbs.irq_lock
+ * (as we are holding a breadcrumbs.irq_lock already),
+ * so attach this request to the signaler on submission.
+ * The queued irq_work will occur when we finally drop
+ * the engine->active.lock after dequeue.
+ */
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
+
+ /* Also transfer the pending irq_work for the old breadcrumb. */
+ intel_engine_signal_breadcrumbs(rq->engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
@@ -1744,7 +1768,8 @@ static void defer_active(struct intel_engine_cs *engine)
}
static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
int hint;
@@ -1758,6 +1783,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
return hint >= effective_prio(rq);
}
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield);
+}
+
+static bool
+timeslice_expired(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
@@ -1773,8 +1824,7 @@ timeslice(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.timeslice_duration_ms);
}
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
{
const struct intel_engine_execlists *execlists = &engine->execlists;
const struct i915_request *rq = *execlists->active;
@@ -1790,16 +1840,25 @@ active_timeslice(const struct intel_engine_cs *engine)
static void set_timeslice(struct intel_engine_cs *engine)
{
+ unsigned long duration;
+
if (!intel_engine_has_timeslices(engine))
return;
- set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
+ duration = active_timeslice(engine);
+ ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+
+ set_timer_ms(&engine->execlists.timer, duration);
}
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- int prio = queue_prio(execlists);
+ const int prio = queue_prio(execlists);
+ unsigned long duration;
+
+ if (!intel_engine_has_timeslices(engine))
+ return;
WRITE_ONCE(execlists->switch_priority_hint, prio);
if (prio == INT_MIN)
@@ -1808,7 +1867,12 @@ static void start_timeslice(struct intel_engine_cs *engine)
if (timer_pending(&execlists->timer))
return;
- set_timer_ms(&execlists->timer, timeslice(engine));
+ duration = timeslice(engine);
+ ENGINE_TRACE(engine,
+ "start timeslicing, prio:%d, interval:%lu",
+ prio, duration);
+
+ set_timer_ms(&execlists->timer, duration);
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1905,11 +1969,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of trouble.
*/
active = READ_ONCE(execlists->active);
- while ((last = *active) && i915_request_completed(last))
- active++;
- if (last) {
+ /*
+ * In theory we can skip over completed contexts that have not
+ * yet been processed by events (as those events are in flight):
+ *
+ * while ((last = *active) && i915_request_completed(last))
+ * active++;
+ *
+ * However, the GPU cannot handle this as it will ultimately
+ * find itself trying to jump back into a context it has just
+ * completed and barf.
+ */
+
+ if ((last = *active)) {
if (need_preempt(engine, last, rb)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -1936,13 +2015,19 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = NULL;
} else if (need_timeslice(engine, last) &&
- timer_expired(&engine->execlists.timer)) {
+ timeslice_expired(execlists, last)) {
+ if (i915_request_completed(last)) {
+ tasklet_hi_schedule(&execlists->tasklet);
+ return;
+ }
+
ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context,
last->fence.seqno,
last->sched.attr.priority,
- execlists->queue_priority_hint);
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
ring_set_paused(engine, 1);
defer_active(engine);
@@ -2045,7 +2130,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, engine);
+ virtual_xfer_breadcrumbs(ve, rq);
/*
* Move the bound engine to the top of the list
@@ -2203,6 +2288,7 @@ done:
}
clear_ports(port + 1, last_port - port);
+ WRITE_ONCE(execlists->yield, -1);
execlists_submit_ports(engine);
set_preempt_timeout(engine, *active);
} else {
@@ -2298,6 +2384,13 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
+static inline void flush_hwsp(const struct i915_request *rq)
+{
+ mb();
+ clflush((void *)READ_ONCE(rq->hwsp_seqno));
+ mb();
+}
+
static void process_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2374,8 +2467,6 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
@@ -2388,6 +2479,7 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
memcpy(execlists->inflight,
execlists->pending,
execlists_num_ports(execlists) *
@@ -2409,13 +2501,24 @@ static void process_csb(struct intel_engine_cs *engine)
* user interrupt and CSB is processed.
*/
if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists)) {
- struct i915_request *rq __maybe_unused =
- *execlists->active;
+ !i915_request_completed(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
+ /*
+ * Flush the breadcrumb before crying foul.
+ *
+ * Since we have hit this on icl and seen the
+ * breadcrumb advance as we print out the debug
+ * info (so the problem corrected itself without
+ * lasting damage), and we know that icl suffers
+ * from missing global observation points in
+ * execlists, presume that affects even more
+ * coherency.
+ */
+ flush_hwsp(rq);
+
ENGINE_TRACE(engine,
"ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
ENGINE_READ(engine, RING_START),
@@ -2436,7 +2539,10 @@ static void process_csb(struct intel_engine_cs *engine)
regs[CTX_RING_HEAD],
regs[CTX_RING_TAIL]);
- GEM_BUG_ON("context completed before request");
+ /* Still? Declare it caput! */
+ if (!i915_request_completed(rq) &&
+ !reset_in_progress(execlists))
+ GEM_BUG_ON("context completed before request");
}
execlists_schedule_out(*execlists->active++);
@@ -2726,6 +2832,45 @@ err_cap:
return NULL;
}
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (upper_32_bits(rq->context->lrc_desc) == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at active:%zd\n",
+ port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (upper_32_bits(rq->context->lrc_desc) == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid found at pending:%zd\n",
+ port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
static bool execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
@@ -2743,7 +2888,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
return true;
spin_lock_irq(&engine->active.lock);
- cap->rq = execlists_active(&engine->execlists);
+ cap->rq = active_context(engine, active_ccid(engine));
if (cap->rq) {
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
cap->rq = i915_request_get_rcu(cap->rq);
@@ -2891,10 +3036,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
- if (execlists->tasklet.func == execlists_submission_tasklet)
- __execlists_submission_tasklet(engine);
- else
- tasklet_hi_schedule(&execlists->tasklet);
+ /* Hopefully we clear execlists->pending[] to let us through */
+ if (READ_ONCE(execlists->pending[0]) &&
+ tasklet_trylock(&execlists->tasklet)) {
+ process_csb(engine);
+ tasklet_unlock(&execlists->tasklet);
+ }
+
+ __execlists_submission_tasklet(engine);
}
static void submit_queue(struct intel_engine_cs *engine,
@@ -2980,7 +3129,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
vaddr += engine->context_size;
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- dev_err_once(engine->i915->drm.dev,
+ drm_err_once(&engine->i915->drm,
"%s context redzone overwritten!\n",
engine->name);
}
@@ -3432,7 +3581,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
ret = lrc_setup_wa_ctx(engine);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to setup context WA page: %d\n", ret);
return ret;
}
@@ -3475,7 +3625,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
status = ENGINE_READ(engine, RING_ESR);
if (unlikely(status)) {
- dev_err(engine->i915->drm.dev,
+ drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
__intel_gt_reset(engine->gt, engine->mask);
@@ -3539,7 +3689,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
bool unexpected = false;
if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -3599,6 +3750,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
+ ring_set_paused(engine, 1);
intel_engine_stop_cs(engine);
}
@@ -4439,6 +4591,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4483,7 +4636,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
* because we only expect rare glitches but nothing
* critical to prevent us from using GPU
*/
- DRM_ERROR("WA batch buffer initialization failed\n");
+ drm_err(&i915->drm, "WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = uncore->regs +
@@ -4565,6 +4718,7 @@ static void init_common_reg_state(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_TIMESTAMP] = 0;
}
static void init_wa_bb_reg_state(u32 * const regs,
@@ -4658,7 +4812,8 @@ populate_lr_context(struct intel_context *ce,
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Could not map object pages! (%d)\n", ret);
return ret;
}
@@ -4751,7 +4906,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
- DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
+ drm_dbg(&engine->i915->drm,
+ "Failed to populate LRC: %d\n", ret);
goto error_ring_free;
}
@@ -4804,6 +4960,8 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
intel_context_fini(&ve->context);
+ intel_engine_free_request_pool(&ve->base);
+
kfree(ve->bonds);
kfree(ve);
}
@@ -4984,10 +5142,8 @@ static void virtual_submission_tasklet(unsigned long data)
submit_engine:
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint) {
- sibling->execlists.queue_priority_hint = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
tasklet_hi_schedule(&sibling->execlists.tasklet);
- }
spin_unlock(&sibling->active.lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 66c07c32745c..1c1923ec8be7 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -246,16 +246,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
&rc6vids, NULL);
if (IS_GEN(i915, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (IS_GEN(i915, 6) &&
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ drm_dbg(&i915->drm,
+ "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ drm_err(&i915->drm,
+ "Couldn't fix incorrect rc6 voltage\n");
}
}
@@ -263,14 +265,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
static int chv_rc6_init(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
resource_size_t pctx_paddr, paddr;
resource_size_t pctx_size = 32 * SZ_1K;
u32 pcbr;
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+ paddr = i915->dsm.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -304,7 +307,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
goto out;
}
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
/*
* From the Gunit register HAS:
@@ -316,7 +319,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
*/
pctx = i915_gem_object_create_stolen(i915, pctx_size);
if (IS_ERR(pctx)) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ drm_dbg(&i915->drm,
+ "not enough stolen space for PCTX, disabling\n");
return PTR_ERR(pctx);
}
@@ -398,14 +402,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
rc_sw_target &= RC_SW_TARGET_STATE_MASK;
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ drm_dbg(&i915->drm, "BIOS enabled RC states: "
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
rc_sw_target);
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
enable_rc6 = false;
}
@@ -417,7 +421,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -425,24 +429,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ drm_dbg(&i915->drm,
+ "Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ drm_dbg(&i915->drm, "GPM control not setup properly.\n");
enable_rc6 = false;
}
@@ -463,7 +468,7 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 and powersaving disabled by BIOS\n");
return false;
}
@@ -495,7 +500,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6)
if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
return false;
- dev_notice(i915->drm.dev,
+ drm_notice(&i915->drm,
"RC6 context corruption, disabling runtime power management\n");
return true;
}
@@ -603,6 +608,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
void intel_rc6_park(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ unsigned int target;
if (!rc6->enabled)
return;
@@ -617,7 +623,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
/* Turn off the HW timers and go directly to rc6 */
set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
- set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ target = 0x6; /* deepest rc6 */
+ else if (HAS_RC6p(rc6_to_i915(rc6)))
+ target = 0x5; /* deep rc6 */
+ else
+ target = 0x4; /* normal rc6 */
+ set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 5954ecc3207f..26e78db33675 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so,
}
if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
goto err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8b170c1876b3..39070b514e65 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -88,6 +88,11 @@ static bool mark_guilty(struct i915_request *rq)
bool banned;
int i;
+ if (intel_context_is_closed(rq->context)) {
+ intel_context_set_banned(rq->context);
+ return true;
+ }
+
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
@@ -104,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq)
goto out;
}
- dev_notice(ctx->i915->drm.dev,
+ drm_notice(&ctx->i915->drm,
"%s context reset due to GPU hang\n",
ctx->name);
@@ -750,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->ggtt);
+ intel_ggtt_restore_fences(gt->ggtt);
return err;
}
@@ -1026,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto unlock;
if (reason)
- dev_notice(gt->i915->drm.dev,
+ drm_notice(&gt->i915->drm,
"Resetting chip for %s\n", reason);
atomic_inc(&gt->i915->gpu_error.reset_count);
@@ -1034,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
- dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
+ drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
@@ -1044,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_runtime_pm_disable_interrupts(gt->i915);
if (do_reset(gt, stalled_mask)) {
- dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
+ drm_err(&gt->i915->drm, "Failed to reset chip\n");
goto taint;
}
@@ -1106,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
@@ -1131,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
reset_prepare_engine(engine);
if (msg)
- dev_notice(engine->i915->drm.dev,
+ drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
@@ -1376,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work)
{
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->gt->i915->drm.dev,
+ drm_err(&w->gt->i915->drm,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
intel_gt_set_wedged(w->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 5bdce24994aa..cc0ebca65167 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
+ unsigned int head = READ_ONCE(ring->head);
+
GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
@@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
* into the same cacheline as ring->head.
*/
#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
+ GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
#undef cacheline
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 1424582e4a9b..d015f7b8b28e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -577,8 +577,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ engine->name);
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -601,8 +602,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
MODE_IDLE,
MODE_IDLE,
1000)) {
- DRM_ERROR("%s : timed out trying to stop ring\n",
- engine->name);
+ drm_err(&dev_priv->drm,
+ "%s : timed out trying to stop ring\n",
+ engine->name);
/*
* Sometimes we observe that the idle flag is not
@@ -661,22 +663,23 @@ static int xcs_resume(struct intel_engine_cs *engine)
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- DRM_DEBUG_DRIVER("%s head not reset to zero "
+ drm_dbg(&dev_priv->drm, "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
+
+ if (!stop_ring(engine)) {
+ drm_err(&dev_priv->drm,
+ "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_HEAD),
ENGINE_READ(engine, RING_TAIL),
ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- DRM_ERROR("failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -719,7 +722,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
- DRM_ERROR("%s initialization failed "
+ drm_err(&dev_priv->drm, "%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
@@ -2088,7 +2091,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
- if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
+ if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 87f9638d2cbf..4dcfae16a7ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -81,13 +81,14 @@ static void rps_enable_interrupts(struct intel_rps *rps)
events = (GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
-
WRITE_ONCE(rps->pm_events, events);
+
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
- set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
}
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
@@ -120,7 +121,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
struct intel_gt *gt = rps_to_gt(rps);
WRITE_ONCE(rps->pm_events, 0);
- set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
+
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
@@ -183,14 +186,12 @@ static void gen5_rps_init(struct intel_rps *rps)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
+ drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
rps->min_freq = fmax;
+ rps->efficient_freq = fstart;
rps->max_freq = fmin;
-
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
}
static unsigned long
@@ -453,7 +454,8 @@ static bool gen5_rps_enable(struct intel_rps *rps)
if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
+ drm_err(&uncore->i915->drm,
+ "stuck trying to change perf mode\n");
mdelay(1);
gen5_rps_set(rps, rps->cur_freq);
@@ -712,8 +714,6 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
void intel_rps_unpark(struct intel_rps *rps)
{
- u8 freq;
-
if (!rps->enabled)
return;
@@ -725,9 +725,10 @@ void intel_rps_unpark(struct intel_rps *rps)
WRITE_ONCE(rps->active, true);
- freq = max(rps->cur_freq, rps->efficient_freq),
- freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
- intel_rps_set(rps, freq);
+ intel_rps_set(rps,
+ clamp(rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit));
rps->last_adj = 0;
@@ -770,6 +771,19 @@ void intel_rps_park(struct intel_rps *rps)
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+ /*
+ * Since we will try and restart from the previously requested
+ * frequency on unparking, treat this idle point as a downclock
+ * interrupt and reduce the frequency for resume. If we park/unpark
+ * more frequently than the rps worker can run, we will not respond
+ * to any EI and never see a change in frequency.
+ *
+ * (Note we accommodate Cherryview's limitation of only using an
+ * even bin by applying it to all.)
+ */
+ rps->cur_freq =
+ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
}
void intel_rps_boost(struct i915_request *rq)
@@ -880,12 +894,13 @@ static void gen6_rps_init(struct intel_rps *rps)
static bool rps_reset(struct intel_rps *rps)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
/* force a reset */
rps->power.mode = -1;
rps->last_freq = -1;
if (rps_set(rps, rps->min_freq, true)) {
- DRM_ERROR("Failed to reset RPS to initial values\n");
+ drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
return false;
}
@@ -1036,8 +1051,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1134,8 +1149,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
@@ -1292,7 +1307,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+ drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
+ rps->gpll_ref_freq);
}
static void vlv_rps_init(struct intel_rps *rps)
@@ -1320,28 +1336,24 @@ static void vlv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1333;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = vlv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = vlv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = vlv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1371,28 +1383,24 @@ static void chv_rps_init(struct intel_rps *rps)
i915->mem_freq = 1600;
break;
}
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->max_freq),
- rps->max_freq);
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = chv_rps_rpe_freq(rps);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->efficient_freq),
- rps->efficient_freq);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = chv_rps_guar_freq(rps);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->rp1_freq),
- rps->rp1_freq);
+ drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = chv_rps_min_freq(rps);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(rps, rps->min_freq),
- rps->min_freq);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1455,6 +1463,7 @@ static void rps_work(struct work_struct *work)
{
struct intel_rps *rps = container_of(work, typeof(*rps), work);
struct intel_gt *gt = rps_to_gt(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
bool client_boost = false;
int new_freq, adj, min, max;
u32 pm_iir = 0;
@@ -1530,7 +1539,7 @@ static void rps_work(struct work_struct *work)
new_freq = clamp_t(int, new_freq, min, max);
if (intel_rps_set(rps, new_freq)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
rps->last_adj = 0;
}
@@ -1652,9 +1661,10 @@ void intel_rps_init(struct intel_rps *rps)
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
&params, NULL);
if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
+ drm_dbg(&i915->drm,
+ "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
rps->max_freq = params & 0xff;
}
}
@@ -1662,7 +1672,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
+
+ /* Start in the middle, from here we will autotune based on workload */
+ rps->cur_freq = rps->efficient_freq;
rps->pm_intrmsk_mbz = 0;
@@ -1914,3 +1926,7 @@ bool i915_gpu_turbo_disable(void)
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rps.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 74f793423231..d173271c7397 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
bool subslice_pg = sseu->has_subslice_pg;
- struct intel_sseu ctx_sseu;
u8 slices, subslices;
u32 rpcs = 0;
@@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
/*
* If i915/perf is active, we want a stable powergating configuration
- * on the system.
- *
- * We could choose full enablement, but on ICL we know there are use
- * cases which disable slices for functional, apart for performance
- * reasons. So in this case we select a known stable subset.
+ * on the system. Use the configuration pinned by i915/perf.
*/
- if (!i915->perf.exclusive_stream) {
- ctx_sseu = *req_sseu;
- } else {
- ctx_sseu = intel_sseu_from_device_info(sseu);
-
- if (IS_GEN(i915, 11)) {
- /*
- * We only need subslice count so it doesn't matter
- * which ones we select - just turn off low bits in the
- * amount of half of all available subslices per slice.
- */
- ctx_sseu.subslice_mask =
- ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
- ctx_sseu.slice_mask = 0x1;
- }
- }
+ if (i915->perf.exclusive_stream)
+ req_sseu = &i915->perf.sseu;
- slices = hweight8(ctx_sseu.slice_mask);
- subslices = hweight8(ctx_sseu.subslice_mask);
+ slices = hweight8(req_sseu->slice_mask);
+ subslices = hweight8(req_sseu->subslice_mask);
/*
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
@@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
if (sseu->has_eu_pg) {
u32 val;
- val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
- val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 91debbc97c9a..3779c2ae0d65 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
+static void __rcu_cacheline_free(struct rcu_head *rcu)
+{
+ struct intel_timeline_cacheline *cl =
+ container_of(rcu, typeof(*cl), rcu);
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
i915_vma_put(cl->hwsp->vma);
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
- i915_active_fini(&cl->active);
- kfree_rcu(cl, rcu);
+ call_rcu(&cl->rcu, __rcu_cacheline_free);
}
__i915_active_call
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5176ad1a3976..adddc5c93b48 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -837,7 +837,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
- DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
} else {
l3_en = ~0;
@@ -846,7 +846,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
slice = fls(sseu->slice_mask) - 1;
subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
- DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ drm_warn(&i915->drm,
+ "No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
drm_WARN_ON(&i915->drm, !subslice);
@@ -861,7 +862,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
}
- DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+ drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
@@ -942,6 +943,8 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ wa_init_mcr(i915, wal);
+
/* Wa_1409420604:tgl */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
@@ -1379,12 +1382,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl
- */
- wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
-
/* Wa_1408615072:tgl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
@@ -1402,6 +1399,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
if (IS_GEN(i915, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 09ff8e4f88af..c50bb502fe03 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -7,6 +7,7 @@
#include "selftest_llc.h"
#include "selftest_rc6.h"
+#include "selftest_rps.h"
static int live_gt_resume(void *arg)
{
@@ -52,6 +53,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_rc6_manual),
+ SUBTEST(live_rps_interrupt),
SUBTEST(live_gt_resume),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 6f06ba750a0a..6f5e35afe1b2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -68,26 +68,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
timeout += jiffies;
do {
- cond_resched();
- intel_engine_flush_submission(engine);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
+ bool done = time_after(jiffies, timeout);
- if (i915_request_is_active(rq))
+ if (i915_request_completed(rq)) /* that was quick! */
return 0;
- if (i915_request_started(rq)) /* that was quick! */
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
- } while (time_before(jiffies, timeout));
- return -ETIME;
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
}
static int wait_for_reset(struct intel_engine_cs *engine,
@@ -634,9 +649,9 @@ static int live_error_interrupt(void *arg)
error_repr(p->error[i]));
if (!i915_request_started(client[i])) {
- pr_debug("%s: %s request not stated!\n",
- engine->name,
- error_repr(p->error[i]));
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
err = -ETIME;
goto out;
}
@@ -644,9 +659,10 @@ static int live_error_interrupt(void *arg)
/* Kick the tasklet to process the error */
intel_engine_flush_submission(engine);
if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request completed with wrong error code: %d\n",
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
engine->name,
error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
client[i]->fence.error);
err = -EINVAL;
goto out;
@@ -929,7 +945,7 @@ create_rewinder(struct intel_context *ce,
goto err;
}
- cs = intel_ring_begin(rq, 10);
+ cs = intel_ring_begin(rq, 14);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err;
@@ -941,8 +957,8 @@ create_rewinder(struct intel_context *ce,
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
*cs++ = offset;
*cs++ = 0;
@@ -951,6 +967,11 @@ create_rewinder(struct intel_context *ce,
*cs++ = offset + idx * sizeof(u32);
*cs++ = 0;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_MASK;
@@ -984,7 +1005,7 @@ static int live_timeslice_rewind(void *arg)
for_each_engine(engine, gt, id) {
enum { A1, A2, B1 };
- enum { X = 1, Y, Z };
+ enum { X = 1, Z, Y };
struct i915_request *rq[3] = {};
struct intel_context *ce;
unsigned long heartbeat;
@@ -1017,13 +1038,13 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[0] = create_rewinder(ce, NULL, slot, 1);
+ rq[0] = create_rewinder(ce, NULL, slot, X);
if (IS_ERR(rq[0])) {
intel_context_put(ce);
goto err;
}
- rq[1] = create_rewinder(ce, NULL, slot, 2);
+ rq[1] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce);
if (IS_ERR(rq[1]))
goto err;
@@ -1041,7 +1062,7 @@ static int live_timeslice_rewind(void *arg)
goto err;
}
- rq[2] = create_rewinder(ce, rq[0], slot, 3);
+ rq[2] = create_rewinder(ce, rq[0], slot, Z);
intel_context_put(ce);
if (IS_ERR(rq[2]))
goto err;
@@ -1052,18 +1073,14 @@ static int live_timeslice_rewind(void *arg)
engine->name);
goto err;
}
- GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[A2]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
-
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
-
+ if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
@@ -1228,8 +1245,14 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_rq;
- intel_engine_flush_submission(engine);
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
if (!READ_ONCE(engine->execlists.timer.expires) &&
+ execlists_active(&engine->execlists) == rq &&
!i915_request_completed(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -2030,6 +2053,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
@@ -2630,7 +2656,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (IS_ERR(rq))
goto err_obj;
- rq->batch = vma;
+ rq->batch = i915_vma_get(vma);
i915_request_get(rq);
i915_vma_lock(vma);
@@ -2654,6 +2680,7 @@ static int create_gang(struct intel_engine_cs *engine,
return 0;
err_rq:
+ i915_vma_put(rq->batch);
i915_request_put(rq);
err_obj:
i915_gem_object_put(obj);
@@ -2750,6 +2777,7 @@ static int live_preempt_gang(void *arg)
err = -ETIME;
}
+ i915_vma_put(rq->batch);
i915_request_put(rq);
rq = n;
}
@@ -5155,7 +5183,6 @@ static int compare_isolation(struct intel_engine_cs *engine,
A[0][x], B[0][x], B[1][x],
poison, lrc[dw + 1]);
err = -EINVAL;
- break;
}
}
dw += 2;
@@ -5294,6 +5321,7 @@ static int live_lrc_isolation(void *arg)
0xffffffff,
0xffff0000,
};
+ int err = 0;
/*
* Our goal is try and verify that per-context state cannot be
@@ -5304,7 +5332,6 @@ static int live_lrc_isolation(void *arg)
*/
for_each_engine(engine, gt, id) {
- int err = 0;
int i;
/* Just don't even ask */
@@ -5315,23 +5342,25 @@ static int live_lrc_isolation(void *arg)
intel_engine_pm_get(engine);
if (engine->pinned_default_state) {
for (i = 0; i < ARRAY_SIZE(poison); i++) {
- err = __lrc_isolation(engine, poison[i]);
- if (err)
- break;
+ int result;
- err = __lrc_isolation(engine, ~poison[i]);
- if (err)
- break;
+ result = __lrc_isolation(engine, poison[i]);
+ if (result && !err)
+ err = result;
+
+ result = __lrc_isolation(engine, ~poison[i]);
+ if (result && !err)
+ err = result;
}
}
intel_engine_pm_put(engine);
- if (igt_flush_test(gt->i915))
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
- if (err)
- return err;
+ break;
+ }
}
- return 0;
+ return err;
}
static void garbage_reset(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 5f7e2dcf5686..08c3dbd41b12 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -12,11 +12,44 @@
#include "selftests/i915_random.h"
+static u64 energy_uJ(struct intel_rc6 *rc6)
+{
+ unsigned long long power;
+ u32 units;
+
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ return 0;
+
+ units = (power & 0x1f00) >> 8;
+
+ if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ return 0;
+
+ return (1000000 * power) >> units; /* convert to uJ */
+}
+
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+ u64 result;
+
+ /* XXX VLV_GT_MEDIA_RC6? */
+
+ result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+ return result;
+}
+
int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
+ u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
+ ktime_t dt;
u64 res[2];
int err = 0;
@@ -38,9 +71,14 @@ int live_rc6_manual(void *arg)
__intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
+
+ dt = ktime_get();
+ rc0_power = energy_uJ(rc6);
msleep(250);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ rc0_power = energy_uJ(rc6) - rc0_power;
+ dt = ktime_sub(ktime_get(), dt);
+ res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
(res[1] - res[0]) >> 10);
@@ -48,13 +86,24 @@ int live_rc6_manual(void *arg)
goto out_unlock;
}
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Manually enter RC6 */
intel_rc6_park(rc6);
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
+ intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
+ dt = ktime_get();
+ rc6_power = energy_uJ(rc6);
msleep(100);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
-
+ rc6_power = energy_uJ(rc6) - rc6_power;
+ dt = ktime_sub(ktime_get(), dt);
+ res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
@@ -63,6 +112,15 @@ int live_rc6_manual(void *arg)
err = -EINVAL;
}
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Restore what should have been the original state! */
intel_rc6_unpark(rc6);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
new file mode 100644
index 000000000000..26aadc2ae3be
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "selftest_rps.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+
+static void dummy_rps_work(struct work_struct *wrk)
+{
+}
+
+static int __rps_up_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ struct i915_request *rq;
+ u32 timeout;
+
+ if (!intel_engine_can_store_dword(engine))
+ return 0;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ GEM_BUG_ON(rps->active);
+
+ rps->pm_iir = 0;
+ rps->cur_freq = rps->min_freq;
+
+ rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ i915_request_put(rq);
+ intel_gt_set_wedged(engine->gt);
+ return -EIO;
+ }
+
+ if (!rps->active) {
+ pr_err("%s: RPS not enabled on starting spinner\n",
+ engine->name);
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: RPS did not register UP interrupt\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->min_freq) {
+ pr_err("%s: RPS did not program min frequency\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout);
+
+ usleep_range(2 * timeout, 3 * timeout);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+
+ if (rps->cur_freq != rps->min_freq) {
+ pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
+ engine->name, intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ return 0;
+}
+
+static int __rps_down_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u32 timeout;
+
+ mutex_lock(&rps->lock);
+ GEM_BUG_ON(!rps->active);
+ intel_rps_set(rps, rps->max_freq);
+ mutex_unlock(&rps->lock);
+
+ if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
+ pr_err("%s: RPS did not register DOWN interrupt\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->max_freq) {
+ pr_err("%s: RPS did not program max frequency\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout);
+
+ /* Flush any previous EI */
+ usleep_range(timeout, 2 * timeout);
+
+ /* Reset the interrupt status */
+ rps_disable_interrupts(rps);
+ GEM_BUG_ON(rps->pm_iir);
+ rps_enable_interrupts(rps);
+
+ /* And then wait for the timeout, for real this time */
+ usleep_range(2 * timeout, 3 * timeout);
+
+ if (rps->cur_freq != rps->max_freq) {
+ pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
+ engine->name,
+ intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
+ pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int live_rps_interrupt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ u32 pm_events;
+ int err = 0;
+
+ /*
+ * First, let's check whether or not we are receiving interrupts.
+ */
+
+ if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ return 0;
+
+ intel_gt_pm_get(gt);
+ pm_events = rps->pm_events;
+ intel_gt_pm_put(gt);
+ if (!pm_events) {
+ pr_err("No RPS PM events registered, but RPS is enabled?\n");
+ return -ENODEV;
+ }
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ /* Keep the engine busy with a spinner; expect an UP! */
+ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
+ err = __rps_up_interrupt(rps, engine, &spin);
+ if (err)
+ goto out;
+ }
+
+ /* Keep the engine awake but idle and check for DOWN */
+ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
+ intel_engine_pm_get(engine);
+ intel_rc6_disable(&gt->rc6);
+
+ err = __rps_down_interrupt(rps, engine);
+
+ intel_rc6_enable(&gt->rc6);
+ intel_engine_pm_put(engine);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h
new file mode 100644
index 000000000000..abba66420996
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_RPS_H
+#define SELFTEST_RPS_H
+
+int live_rps_interrupt(void *arg);
+
+#endif /* SELFTEST_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 819f09ef51fc..861657897c0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
- intel_guc_fw_init_early(guc);
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
intel_guc_submission_init_early(guc);
@@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return 0;
}
+
+/**
+ * intel_guc_load_status - dump information about GuC load status
+ * @guc: the GuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC load status.
+ */
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_supported(guc)) {
+ drm_printf(p, "GuC not supported\n");
+ return;
+ }
+
+ if (!intel_guc_is_wanted(guc)) {
+ drm_printf(p, "GuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&guc->fw, p);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ u32 status = intel_uncore_read(uncore, GUC_STATUS);
+ u32 i;
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 4594ccbeaa34..e84ab67b317d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -74,6 +74,11 @@ struct intel_guc {
struct mutex send_mutex;
};
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+ return container_of(log, struct intel_guc, log);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
@@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
new file mode 100644
index 000000000000..fe7cb7b29a1e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_debugfs.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_guc *guc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ intel_guc_load_status(guc, &p);
+ drm_puts(&p, "\n");
+ intel_guc_log_info(&guc->log, &p);
+
+ /* Add more as required ... */
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_info", &guc_info_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(guc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
+ intel_guc_log_debugfs_register(&guc->log, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
new file mode 100644
index 000000000000..424c26665cf1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_H
+#define DEBUGFS_GUC_H
+
+struct intel_guc;
+struct dentry;
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
+
+#endif /* DEBUGFS_GUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 3a1c47d600ea..d4a87f4c9421 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,20 +13,6 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
- intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
static void guc_prepare_xfer(struct intel_uncore *uncore)
{
u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
index b5ab639d7259..0b4d2a9c9435 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -8,7 +8,6 @@
struct intel_guc;
-void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index caed0d57e704..fb10f3597ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -55,11 +55,6 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
-{
- return container_of(log, struct intel_guc, log);
-}
-
static void guc_log_enable_flush_events(struct intel_guc_log *log)
{
intel_guc_enable_msg(log_to_guc(log),
@@ -672,3 +667,95 @@ void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
queue_work(system_highpri_wq, &log->relay.flush_work);
}
+
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return "ISR";
+ case GUC_DPC_LOG_BUFFER:
+ return "DPC";
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return "CRASH";
+ default:
+ MISSING_CASE(type);
+ }
+
+ return "";
+}
+
+/**
+ * intel_guc_log_info - dump information about GuC log relay
+ * @log: the GuC log
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC log info
+ */
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
+{
+ enum guc_log_buffer_type type;
+
+ if (!intel_guc_log_relay_created(log)) {
+ drm_puts(p, "GuC log relay not created\n");
+ return;
+ }
+
+ drm_puts(p, "GuC logging stats:\n");
+
+ drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
+ stringify_guc_log_type(type),
+ log->stats[type].flush,
+ log->stats[type].sampled_overflow);
+ }
+}
+
+/**
+ * intel_guc_log_dump - dump the contents of the GuC log
+ * @log: the GuC log
+ * @p: the &drm_printer
+ * @dump_load_err: dump the log saved on GuC load error
+ *
+ * Pretty printer for the GuC log
+ */
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+ struct drm_i915_gem_object *obj = NULL;
+ u32 *map;
+ int i = 0;
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ if (dump_load_err)
+ obj = uc->load_err_log;
+ else if (guc->log.vma)
+ obj = guc->log.vma->obj;
+
+ if (!obj)
+ return 0;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ DRM_DEBUG("Failed to pin object\n");
+ drm_puts(p, "(log data unaccessible)\n");
+ return PTR_ERR(map);
+ }
+
+ for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(map + i), *(map + i + 1),
+ *(map + i + 2), *(map + i + 3));
+
+ drm_puts(p, "\n");
+
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index c252c022c5fc..11fccd0b2294 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -79,4 +79,8 @@ static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
return log->level;
}
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
new file mode 100644
index 000000000000..129e0cf7dfe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_guc.h"
+#include "intel_guc_log.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, false);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+
+static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return intel_guc_log_dump(m->private, &p, true);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+
+static int guc_log_level_get(void *data, u64 *val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ *val = intel_guc_log_get_level(log);
+
+ return 0;
+}
+
+static int guc_log_level_set(void *data, u64 val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!intel_guc_is_used(log_to_guc(log)))
+ return -ENODEV;
+
+ return intel_guc_log_set_level(log, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
+ guc_log_level_get, guc_log_level_set,
+ "%lld\n");
+
+static int guc_log_relay_open(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ if (!intel_guc_is_ready(log_to_guc(log)))
+ return -ENODEV;
+
+ file->private_data = log;
+
+ return intel_guc_log_relay_open(log);
+}
+
+static ssize_t
+guc_log_relay_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
+}
+
+static int guc_log_relay_release(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ intel_guc_log_relay_close(log);
+ return 0;
+}
+
+static const struct file_operations guc_log_relay_fops = {
+ .owner = THIS_MODULE,
+ .open = guc_log_relay_open,
+ .write = guc_log_relay_write,
+ .release = guc_log_relay_release,
+};
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "guc_log_dump", &guc_log_dump_fops, NULL },
+ { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
+ { "guc_log_level", &guc_log_level_fops, NULL },
+ { "guc_log_relay", &guc_log_relay_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(log_to_guc(log)))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
+}
+
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
new file mode 100644
index 000000000000..e8900e3d74ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_LOG_H
+#define DEBUGFS_GUC_LOG_H
+
+struct intel_guc_log;
+struct dentry;
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root);
+
+#endif /* DEBUGFS_GUC_LOG_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index a74b65694512..65eeb44b397d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -41,7 +41,7 @@ void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
- intel_huc_fw_init_early(huc);
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
if (INTEL_GEN(i915) >= 11) {
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
@@ -200,9 +200,13 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns: 1 if HuC firmware is loaded and verified,
- * 0 if HuC firmware is not loaded and -ENODEV if HuC
- * is not present on this platform.
+ * Returns:
+ * * -ENODEV if HuC is not present on this platform,
+ * * -EOPNOTSUPP if HuC firmware is disabled,
+ * * -ENOPKG if HuC firmware was not installed,
+ * * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * * 0 if HuC firmware is not running,
+ * * 1 if HuC firmware is authenticated and running.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
@@ -210,11 +214,50 @@ int intel_huc_check_status(struct intel_huc *huc)
intel_wakeref_t wakeref;
u32 status = 0;
- if (!intel_huc_is_supported(huc))
+ switch (__intel_uc_fw_status(&huc->fw)) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EOPNOTSUPP;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOPKG;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ default:
+ break;
+ }
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
}
+
+/**
+ * intel_huc_load_status - dump information about HuC load status
+ * @huc: the HuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for HuC load status.
+ */
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+
+ if (!intel_huc_is_supported(huc)) {
+ drm_printf(p, "HuC not supported\n");
+ return;
+ }
+
+ if (!intel_huc_is_wanted(huc)) {
+ drm_printf(p, "HuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&huc->fw, p);
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ drm_printf(p, "HuC status: 0x%08x\n",
+ intel_uncore_read(gt->uncore, huc->status.reg));
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index a40b9cfc6c22..daee43b661d4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,4 +57,6 @@ static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
return intel_uc_fw_is_running(&huc->fw);
}
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
new file mode 100644
index 000000000000..5733c15fd123
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/debugfs_gt.h"
+#include "intel_huc.h"
+#include "intel_huc_debugfs.h"
+
+static int huc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_huc *huc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_huc_is_supported(huc))
+ return -ENODEV;
+
+ intel_huc_load_status(huc, &p);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "huc_info", &huc_info_fops, NULL },
+ };
+
+ if (!intel_huc_is_supported(huc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
new file mode 100644
index 000000000000..be79e992f976
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_HUC_H
+#define DEBUGFS_HUC_H
+
+struct intel_huc;
+struct dentry;
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root);
+
+#endif /* DEBUGFS_HUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 9cdf4cbe691c..e5ef509c70e8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,23 +8,6 @@
#include "i915_drv.h"
/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_gt *gt = huc_to_gt(huc);
- struct intel_uc *uc = &gt->uc;
- struct drm_i915_private *i915 = gt->i915;
-
- intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_wants_guc(uc),
- INTEL_INFO(i915)->platform, INTEL_REVID(i915));
-}
-
-/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index b791269ce923..12f264ee3e0b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -8,7 +8,6 @@
struct intel_huc;
-void intel_huc_fw_init_early(struct intel_huc *huc);
int intel_huc_fw_upload(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index a4cbe06e06bd..f518fe05c6f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -45,12 +45,12 @@ static void __confirm_options(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_wants_guc(uc)),
- yesno(intel_uc_wants_guc_submission(uc)),
- yesno(intel_uc_wants_huc(uc)));
+ drm_dbg(&i915->drm,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
@@ -63,25 +63,25 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "HuC is not supported!");
if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "GuC submission is N/A");
if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
ENABLE_GUC_LOAD_HUC))
- dev_info(i915->drm.dev,
+ drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915_modparams.enable_guc, "undocumented flag");
}
@@ -131,6 +131,13 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
i915_gem_object_put(log);
}
+void intel_uc_driver_remove(struct intel_uc *uc)
+{
+ intel_uc_fini_hw(uc);
+ intel_uc_fini(uc);
+ __uc_free_load_err_log(uc);
+}
+
static inline bool guc_communication_enabled(struct intel_guc *guc)
{
return intel_guc_ct_enabled(&guc->ct);
@@ -311,8 +318,6 @@ static void __uc_fini(struct intel_uc *uc)
{
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
-
- __uc_free_load_err_log(uc);
}
static int __uc_sanitize(struct intel_uc *uc)
@@ -475,14 +480,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
- dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
huc->fw.path,
huc->fw.major_ver_found, huc->fw.minor_ver_found,
@@ -503,7 +508,7 @@ err_out:
__uc_sanitize(uc);
if (!ret) {
- dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ drm_notice(&i915->drm, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 5ae7b50b7dc1..9c954c589edf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -34,6 +34,7 @@ struct intel_uc {
void intel_uc_init_early(struct intel_uc *uc);
void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_driver_remove(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
new file mode 100644
index 000000000000..9d16b784aa0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "intel_guc_debugfs.h"
+#include "intel_huc_debugfs.h"
+#include "intel_uc.h"
+#include "intel_uc_debugfs.h"
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
+{
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ /* GuC and HuC go always in pair, no need to check both */
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ root = debugfs_create_dir("uc", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_guc_debugfs_register(&uc->guc, root);
+ intel_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
new file mode 100644
index 000000000000..010ce250d223
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_UC_H
+#define DEBUGFS_UC_H
+
+struct intel_uc;
+struct dentry;
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root);
+
+#endif /* DEBUGFS_UC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 18c755203688..e1caae93996d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,26 +11,32 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
-static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+static inline struct intel_gt *
+____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
- GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ if (type == INTEL_UC_FW_TYPE_GUC)
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
- GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return ____uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
}
#endif
@@ -187,17 +193,15 @@ static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
* @type: type of uC
- * @supported: is uC support possible
- * @platform: platform identifier
- * @rev: hardware revision
*
* Initialize the state of our uC object and relevant tracking and select the
* firmware to fetch and load.
*/
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev)
+ enum intel_uc_fw_type type)
{
+ struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
* before we're looked at the HW caps to see if we have uc support
@@ -208,8 +212,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
- if (supported) {
- __uc_fw_auto_select(uc_fw, platform, rev);
+ if (HAS_GT_UC(i915)) {
+ __uc_fw_auto_select(uc_fw,
+ INTEL_INFO(i915)->platform,
+ INTEL_REVID(i915));
__uc_fw_user_override(uc_fw);
}
@@ -290,7 +296,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -ENODATA;
@@ -303,7 +309,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- dev_warn(dev,
+ drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
@@ -316,7 +322,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* now RSA */
if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
- dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
err = -EPROTO;
@@ -327,7 +333,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
err = -ENOEXEC;
@@ -337,7 +343,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
- dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
err = -E2BIG;
@@ -352,7 +358,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
uc_fw->major_ver_found, uc_fw->minor_ver_found,
uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
@@ -380,9 +386,9 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+ drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
@@ -467,7 +473,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
intel_uncore_read_fw(uncore, DMA_CTRL));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 888ff0de0244..23d3a423ac0f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -239,8 +239,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
}
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type, bool supported,
- enum intel_platform platform, u8 rev);
+ enum intel_uc_fw_type type);
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 8b13f091cee2..0d6d59871308 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -35,7 +35,7 @@
*/
#include "i915_drv.h"
-#include "i915_gem_fence_reg.h"
+#include "gt/intel_ggtt_fencing.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 9e065ad0658f..a3cc080a46c6 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -164,6 +164,7 @@ struct decode_info {
#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
@@ -967,18 +968,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- u32 valid_len = CMD_LEN(1);
-
- /*
- * Official intel docs are somewhat sloppy , check the definition of
- * MI_LOAD_REGISTER_IMM.
- */
- #define MAX_VALID_LEN 127
- if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
- gvt_err("len is not valid: len=%u valid_len=%u\n",
- cmd_len, valid_len);
- return -EFAULT;
- }
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
@@ -2485,6 +2474,9 @@ static const struct cmd_info cmd_info[] = {
{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
+ {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
+ F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
+
{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6e5c9885d9fe..a83df2f84eb9 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -221,7 +221,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -241,7 +241,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -261,7 +261,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0182e2a5acff..2faf50e1b051 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -462,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-/* ascendingly sorted */
+/* sorted in ascending order */
static i915_reg_t force_nonpriv_white_list[] = {
+ _MMIO(0xd80),
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
- PS_INVOCATION_COUNT,//_MMIO(0x2348)
+ CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
+ PS_INVOCATION_COUNT, //_MMIO(0x2348)
+ PS_DEPTH_COUNT, //_MMIO(0x2350)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
@@ -491,6 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0xe18c),
_MMIO(0xe48c),
_MMIO(0xe5f4),
+ _MMIO(0x64844),
};
/* a simple bsearch */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 074c4efb58eb..eee530453aa6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -131,6 +131,7 @@ struct kvmgt_vdev {
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
+ struct vfio_group *vfio_group;
};
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
@@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+ ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@@ -183,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
+ struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
@@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
+ vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ if (IS_ERR_OR_NULL(vfio_group)) {
+ ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
+ gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
+ goto undo_register;
+ }
+ vdev->vfio_group = vfio_group;
+
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
@@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
return ret;
undo_group:
+ vfio_group_put_external_user(vdev->vfio_group);
+ vdev->vfio_group = NULL;
+
+undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
@@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
+ vfio_group_put_external_user(vdev->vfio_group);
vdev->kvm = NULL;
vgpu->handle = 0;
@@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write)
{
struct kvmgt_guest_info *info;
- struct kvm *kvm;
- int idx, ret;
- bool kthread = current->mm == NULL;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
- if (kthread) {
- if (!mmget_not_zero(kvm->mm))
- return -EFAULT;
- use_mm(kvm->mm);
- }
-
- idx = srcu_read_lock(&kvm->srcu);
- ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
- kvm_read_guest(kvm, gpa, buf, len);
- srcu_read_unlock(&kvm->srcu, idx);
-
- if (kthread) {
- unuse_mm(kvm->mm);
- mmput(kvm->mm);
- }
- return ret;
+ return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
+ gpa, buf, len, write);
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 867e7629025b..33569b910ed5 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
/* there's features depending on version! */
v->header.version = 155;
v->header.header_size = sizeof(v->header);
- v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+ v->header.vbt_size = sizeof(struct vbt);
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header);
- v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
- - sizeof(struct bdb_header);
+ v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
/* general features */
v->general_features_header.id = BDB_GENERAL_FEATURES;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1c95bf8cbed0..cb11c3184085 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -296,8 +296,8 @@ shadow_context_descriptor_update(struct intel_context *ce,
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
- desc |= workload->ctx_desc.addressing_mode <<
+ desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ desc |= (u64)workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ce->lrc_desc = desc;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 78f14f04d2ea..1d5ff88078bd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -274,10 +274,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *i915 = gvt->gt->i915;
- mutex_lock(&vgpu->vgpu_lock);
-
drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+ /*
+ * remove idr first so later clean can judge if need to stop
+ * service if no active vgpu.
+ */
+ mutex_lock(&gvt->lock);
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+ mutex_unlock(&gvt->lock);
+
+ mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
@@ -292,7 +299,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock);
- idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index c4048628188a..d960d0be5bd2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref)
return err;
}
-int i915_active_wait(struct i915_active *ref)
+int __i915_active_wait(struct i915_active *ref, int state)
{
int err;
@@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref)
if (err)
return err;
- if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+ if (!i915_active_is_idle(ref) &&
+ ___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
return -EINTR;
flush_work(&ref->work);
@@ -540,34 +542,88 @@ static int __await_active(struct i915_active_fence *active,
return 0;
}
+struct wait_barrier {
+ struct wait_queue_entry base;
+ struct i915_active *ref;
+};
+
+static int
+barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
+{
+ struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
+
+ if (i915_active_is_idle(wb->ref)) {
+ list_del(&wq->entry);
+ i915_sw_fence_complete(wq->private);
+ kfree(wq);
+ }
+
+ return 0;
+}
+
+static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
+{
+ struct wait_barrier *wb;
+
+ wb = kmalloc(sizeof(*wb), GFP_KERNEL);
+ if (unlikely(!wb))
+ return -ENOMEM;
+
+ GEM_BUG_ON(i915_active_is_idle(ref));
+ if (!i915_sw_fence_await(fence)) {
+ kfree(wb);
+ return -EINVAL;
+ }
+
+ wb->base.flags = 0;
+ wb->base.func = barrier_wake;
+ wb->base.private = fence;
+ wb->ref = ref;
+
+ add_wait_queue(__var_waitqueue(ref), &wb->base);
+ return 0;
+}
+
static int await_active(struct i915_active *ref,
unsigned int flags,
int (*fn)(void *arg, struct dma_fence *fence),
- void *arg)
+ void *arg, struct i915_sw_fence *barrier)
{
int err = 0;
- /* We must always wait for the exclusive fence! */
- if (rcu_access_pointer(ref->excl.fence)) {
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ if (flags & I915_ACTIVE_AWAIT_EXCL &&
+ rcu_access_pointer(ref->excl.fence)) {
err = __await_active(&ref->excl, fn, arg);
if (err)
- return err;
+ goto out;
}
- if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
err = __await_active(&it->base, fn, arg);
if (err)
- break;
+ goto out;
}
- i915_active_release(ref);
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_BARRIER) {
+ err = flush_lazy_signals(ref);
if (err)
- return err;
+ goto out;
+
+ err = __await_barrier(ref, barrier);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
static int rq_await_fence(void *arg, struct dma_fence *fence)
@@ -579,7 +635,7 @@ int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, rq_await_fence, rq);
+ return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
}
static int sw_await_fence(void *arg, struct dma_fence *fence)
@@ -592,7 +648,7 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
unsigned int flags)
{
- return await_active(ref, flags, sw_await_fence, fence);
+ return await_active(ref, flags, sw_await_fence, fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -818,7 +874,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
- intel_engine_pm_put(engine);
+ intel_engine_pm_put_delay(engine, 1);
}
}
@@ -937,6 +993,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
active_fence_cb(fence, cb);
}
+struct auto_active {
+ struct i915_active base;
+ struct kref ref;
+};
+
+struct i915_active *i915_active_get(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_get(&aa->ref);
+ return &aa->base;
+}
+
+static void auto_release(struct kref *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), ref);
+
+ i915_active_fini(&aa->base);
+ kfree(aa);
+}
+
+void i915_active_put(struct i915_active *ref)
+{
+ struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+ kref_put(&aa->ref, auto_release);
+}
+
+static int auto_active(struct i915_active *ref)
+{
+ i915_active_get(ref);
+ return 0;
+}
+
+static void auto_retire(struct i915_active *ref)
+{
+ i915_active_put(ref);
+}
+
+struct i915_active *i915_active_create(void)
+{
+ struct auto_active *aa;
+
+ aa = kmalloc(sizeof(*aa), GFP_KERNEL);
+ if (!aa)
+ return NULL;
+
+ kref_init(&aa->ref);
+ i915_active_init(&aa->base, auto_active, auto_retire);
+
+ return &aa->base;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b3282ae7913c..cf4058150966 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -181,7 +181,11 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
return rcu_access_pointer(ref->excl.fence);
}
-int i915_active_wait(struct i915_active *ref);
+int __i915_active_wait(struct i915_active *ref, int state);
+static inline int i915_active_wait(struct i915_active *ref)
+{
+ return __i915_active_wait(ref, TASK_INTERRUPTIBLE);
+}
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
struct i915_active *ref,
@@ -189,7 +193,9 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref,
unsigned int flags);
-#define I915_ACTIVE_AWAIT_ALL BIT(0)
+#define I915_ACTIVE_AWAIT_EXCL BIT(0)
+#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
+#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
@@ -221,4 +227,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);
+struct i915_active *i915_active_create(void);
+struct i915_active *i915_active_get(struct i915_active *ref);
+void i915_active_put(struct i915_active *ref);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 25bf997e2dd1..aa35a59f1c7d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -37,7 +37,6 @@
#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
-#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
@@ -218,7 +217,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
- u64 total, unbound;
+ u64 total;
u64 active, inactive;
u64 closed;
};
@@ -234,8 +233,6 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (!atomic_read(&obj->bind_count))
- stats->unbound += obj->base.size;
spin_lock(&obj->vma.lock);
if (!stats->vm) {
@@ -285,13 +282,12 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.unbound, \
stats.closed); \
} while (0)
@@ -745,7 +741,7 @@ i915_error_state_write(struct file *filp,
if (!error)
return 0;
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
i915_reset_error_state(error->i915);
return cnt;
@@ -1251,286 +1247,6 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
-static int i915_huc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
-
- return 0;
-}
-
-static int i915_guc_load_status_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct drm_printer p;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 tmp = I915_READ(GUC_STATUS);
- u32 i;
-
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
- seq_printf(m, "\tBootrom status = 0x%x\n",
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
- seq_printf(m, "\tuKernel status = 0x%x\n",
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
- seq_printf(m, "\tMIA Core status = 0x%x\n",
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
- seq_puts(m, "\nScratch registers:\n");
- for (i = 0; i < 16; i++) {
- seq_printf(m, "\t%2d: \t0x%x\n",
- i, I915_READ(SOFT_SCRATCH(i)));
- }
- }
-
- return 0;
-}
-
-static const char *
-stringify_guc_log_type(enum guc_log_buffer_type type)
-{
- switch (type) {
- case GUC_ISR_LOG_BUFFER:
- return "ISR";
- case GUC_DPC_LOG_BUFFER:
- return "DPC";
- case GUC_CRASH_DUMP_LOG_BUFFER:
- return "CRASH";
- default:
- MISSING_CASE(type);
- }
-
- return "";
-}
-
-static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
-{
- enum guc_log_buffer_type type;
-
- if (!intel_guc_log_relay_created(log)) {
- seq_puts(m, "GuC log relay not created\n");
- return;
- }
-
- seq_puts(m, "GuC logging stats:\n");
-
- seq_printf(m, "\tRelay full count: %u\n",
- log->relay.full_count);
-
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
- stringify_guc_log_type(type),
- log->stats[type].flush,
- log->stats[type].sampled_overflow);
- }
-}
-
-static int i915_guc_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- i915_guc_log_info(m, &uc->guc.log);
-
- /* Add more as required ... */
-
- return 0;
-}
-
-static int i915_guc_stage_pool(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc *uc = &dev_priv->gt.uc;
- struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
- int index;
-
- if (!intel_uc_uses_guc_submission(uc))
- return -ENODEV;
-
- for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
- struct intel_engine_cs *engine;
-
- if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
- continue;
-
- seq_printf(m, "GuC stage descriptor %u:\n", index);
- seq_printf(m, "\tIndex: %u\n", desc->stage_id);
- seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
- seq_printf(m, "\tPriority: %d\n", desc->priority);
- seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
- seq_printf(m, "\tEngines used: 0x%x\n",
- desc->engines_used);
- seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
- desc->db_trigger_phy,
- desc->db_trigger_cpu,
- desc->db_trigger_uk);
- seq_printf(m, "\tProcess descriptor: 0x%x\n",
- desc->process_desc);
- seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
- desc->wq_addr, desc->wq_size);
- seq_putc(m, '\n');
-
- for_each_uabi_engine(engine, dev_priv) {
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc =
- &desc->lrc[guc_engine_id];
-
- seq_printf(m, "\t%s LRC:\n", engine->name);
- seq_printf(m, "\t\tContext desc: 0x%x\n",
- lrc->context_desc);
- seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
- seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
- seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
- seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
- seq_putc(m, '\n');
- }
- }
-
- return 0;
-}
-
-static int i915_guc_log_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- bool dump_load_err = !!node->info_ent->data;
- struct drm_i915_gem_object *obj = NULL;
- u32 *log;
- int i = 0;
-
- if (!HAS_GT_UC(dev_priv))
- return -ENODEV;
-
- if (dump_load_err)
- obj = dev_priv->gt.uc.load_err_log;
- else if (dev_priv->gt.uc.guc.log.vma)
- obj = dev_priv->gt.uc.guc.log.vma->obj;
-
- if (!obj)
- return 0;
-
- log = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(log)) {
- DRM_DEBUG("Failed to pin object\n");
- seq_puts(m, "(log data unaccessible)\n");
- return PTR_ERR(log);
- }
-
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(log + i), *(log + i + 1),
- *(log + i + 2), *(log + i + 3));
-
- seq_putc(m, '\n');
-
- i915_gem_object_unpin_map(obj);
-
- return 0;
-}
-
-static int i915_guc_log_level_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- *val = intel_guc_log_get_level(&uc->guc.log);
-
- return 0;
-}
-
-static int i915_guc_log_level_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_uc *uc = &dev_priv->gt.uc;
-
- if (!intel_uc_uses_guc(uc))
- return -ENODEV;
-
- return intel_guc_log_set_level(&uc->guc.log, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
- i915_guc_log_level_get, i915_guc_log_level_set,
- "%lld\n");
-
-static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
- struct intel_guc_log *log = &guc->log;
-
- if (!intel_guc_is_ready(guc))
- return -ENODEV;
-
- file->private_data = log;
-
- return intel_guc_log_relay_open(log);
-}
-
-static ssize_t
-i915_guc_log_relay_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct intel_guc_log *log = filp->private_data;
- int val;
- int ret;
-
- ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
- if (ret < 0)
- return ret;
-
- /*
- * Enable and start the guc log relay on value of 1.
- * Flush log relay for any other value.
- */
- if (val == 1)
- ret = intel_guc_log_relay_start(log);
- else
- intel_guc_log_relay_flush(log);
-
- return ret ?: cnt;
-}
-
-static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct intel_guc *guc = &i915->gt.uc.guc;
-
- intel_guc_log_relay_close(&guc->log);
- return 0;
-}
-
-static const struct file_operations i915_guc_log_relay_fops = {
- .owner = THIS_MODULE,
- .open = i915_guc_log_relay_open,
- .write = i915_guc_log_relay_write,
- .release = i915_guc_log_relay_release,
-};
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2139,12 +1855,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_guc_info", i915_guc_info, 0},
- {"i915_guc_load_status", i915_guc_load_status_info, 0},
- {"i915_guc_log_dump", i915_guc_log_dump, 0},
- {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
- {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
- {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_context_status", i915_context_status, 0},
@@ -2172,8 +1882,6 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_guc_log_level", &i915_guc_log_level_fops},
- {"i915_guc_log_relay", &i915_guc_log_relay_fops},
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6116dab3d059..641f5e03b661 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -432,8 +432,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->sb_lock);
- pm_qos_add_request(&dev_priv->sb_qos,
- PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
@@ -497,7 +496,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- pm_qos_remove_request(&dev_priv->sb_qos);
+ cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
@@ -674,8 +673,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
@@ -721,7 +719,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -744,7 +742,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
@@ -1284,7 +1282,6 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1602,8 +1599,6 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
-
enable_rpm_wakeref_asserts(rpm);
return ret;
@@ -1683,7 +1678,6 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e0a83b8e34dc..b00f0845cbc3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -92,7 +92,6 @@
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_perf_types.h"
@@ -109,8 +108,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200313"
-#define DRIVER_TIMESTAMP 1584144591
+#define DRIVER_DATE "20200417"
+#define DRIVER_TIMESTAMP 1587105300
struct drm_i915_gem_object;
@@ -417,6 +416,7 @@ struct intel_fbc {
struct {
const struct drm_format_info *format;
unsigned int stride;
+ u64 modifier;
} fb;
u16 gen9_wa_cfb_stride;
s8 fence_id;
@@ -540,7 +540,6 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -888,7 +887,6 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct rb_root uabi_engines;
struct resource mch_res;
@@ -1510,6 +1508,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_ICELAKE(p) && IS_REVID(p, since, until))
#define TGL_REVID_A0 0x0
+#define TGL_REVID_B0 0x1
+#define TGL_REVID_C0 0x2
#define IS_TGL_REVID(p, since, until) \
(IS_TIGERLAKE(p) && IS_REVID(p, since, until))
@@ -1607,7 +1607,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1741,6 +1741,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ca5420012a22..0cbcb9f54e7d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -118,7 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (!atomic_read(&obj->bind_count))
+ if (list_empty(&obj->vma.list))
return 0;
/*
@@ -141,6 +141,11 @@ try_again:
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
+ if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
+ ret = -EBUSY;
+ break;
+ }
+
ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
@@ -993,18 +998,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&ggtt->vm.mutex);
- ret = i915_vma_revoke_fence(vma);
+ i915_vma_revoke_fence(vma);
mutex_unlock(&ggtt->vm.mutex);
- if (ret)
- return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
- if (ret)
- return ERR_PTR(ret);
-
ret = i915_vma_wait_for_bind(vma);
if (ret) {
i915_vma_unpin(vma);
@@ -1156,7 +1159,6 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_ggtt_resume(&dev_priv->ggtt);
- i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4518b9b35c3d..0ba7b1e881c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -228,7 +228,12 @@ found:
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = __i915_vma_unbind(vma);
+
+ /* If we find any non-objects (!vma), we cannot evict them */
+ if (vma->node.color != I915_COLOR_UNEVICTABLE)
+ ret = __i915_vma_unbind(vma);
+ else
+ ret = -ENOSPC; /* XXX search failed, try again? */
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2a4cd0ba5464..424ad975a360 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1858,7 +1858,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
return;
i915 = error->i915;
- dev_info(i915->drm.dev, "%s\n", error_msg(error));
+ drm_info(&i915->drm, "%s\n", error_msg(error));
if (error->simulated ||
cmpxchg(&i915->gpu_error.first_error, NULL, error))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9f0653cf0510..1502ab44f1a5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3658,7 +3658,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3763,7 +3763,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3905,10 +3905,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index fdd550405fd3..7b3b83bd5ab8 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -35,7 +35,6 @@
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
-#ifdef CONFIG_AS_MOVNTDQA
static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
@@ -93,10 +92,6 @@ static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
-#else
-static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
-static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
-#endif
/**
* i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2c80a0194c80..66738f2c4f28 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -160,6 +160,7 @@
GEN(2), \
.is_mobile = 1, \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -179,6 +180,7 @@
#define I845_FEATURES \
GEN(2), \
.pipe_mask = BIT(PIPE_A), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -218,6 +220,7 @@ static const struct intel_device_info i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
@@ -303,6 +306,7 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -354,6 +358,7 @@ static const struct intel_device_info gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -381,6 +386,7 @@ static const struct intel_device_info ilk_m_info = {
#define GEN6_FEATURES \
GEN(6), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -430,6 +436,7 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -482,6 +489,7 @@ static const struct intel_device_info ivb_q_info = {
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
.pipe_mask = 0, /* legal, last one wins */
+ .cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -490,6 +498,7 @@ static const struct intel_device_info vlv_info = {
GEN(7),
.is_lp = 1,
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
@@ -511,6 +520,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
@@ -581,6 +592,7 @@ static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -656,6 +668,9 @@ static const struct intel_device_info skl_gt4_info = {
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
@@ -759,6 +774,9 @@ static const struct intel_device_info cnl_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -799,6 +817,10 @@ static const struct intel_device_info ehl_info = {
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -822,7 +844,6 @@ static const struct intel_device_info ehl_info = {
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1b074bb4a7fe..5cde3e4e7be6 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -204,21 +204,6 @@
#include "i915_drv.h"
#include "i915_perf.h"
-#include "oa/i915_oa_hsw.h"
-#include "oa/i915_oa_bdw.h"
-#include "oa/i915_oa_chv.h"
-#include "oa/i915_oa_sklgt2.h"
-#include "oa/i915_oa_sklgt3.h"
-#include "oa/i915_oa_sklgt4.h"
-#include "oa/i915_oa_bxt.h"
-#include "oa/i915_oa_kblgt2.h"
-#include "oa/i915_oa_kblgt3.h"
-#include "oa/i915_oa_glk.h"
-#include "oa/i915_oa_cflgt2.h"
-#include "oa/i915_oa_cflgt3.h"
-#include "oa/i915_oa_cnl.h"
-#include "oa/i915_oa_icl.h"
-#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -238,26 +223,17 @@
*
* Although this can be observed explicitly while copying reports to userspace
* by checking for a zeroed report-id field in tail reports, we want to account
- * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
- * read() attempts.
- *
- * In effect we define a tail pointer for reading that lags the real tail
- * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
- * time for the corresponding reports to become visible to the CPU.
- *
- * To manage this we actually track two tail pointers:
- * 1) An 'aging' tail with an associated timestamp that is tracked until we
- * can trust the corresponding data is visible to the CPU; at which point
- * it is considered 'aged'.
- * 2) An 'aged' tail that can be used for read()ing.
- *
- * The two separate pointers let us decouple read()s from tail pointer aging.
- *
- * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering EPOLLIN events)
- *
- * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
- * indicates that an updated tail pointer is needed.
+ * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
+ * redundant read() attempts.
+ *
+ * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
+ * in the OA buffer, starting from the tail reported by the HW until we find a
+ * report with its first 2 dwords not 0 meaning its previous report is
+ * completely in memory and ready to be read. Those dwords are also set to 0
+ * once read and the whole buffer is cleared upon OA buffer initialization. The
+ * first dword is the reason for this report while the second is the timestamp,
+ * making the chances of having those 2 fields at 0 fairly unlikely. A more
+ * detailed explanation is available in oa_buffer_check_unlocked().
*
* Most of the implementation details for this workaround are in
* oa_buffer_check_unlocked() and _append_oa_reports()
@@ -272,11 +248,11 @@
#define OA_TAIL_MARGIN_NSEC 100000ULL
#define INVALID_TAIL_PTR 0xffffffff
-/* frequency for checking whether the OA unit has written new reports to the
- * circular OA buffer...
+/* The default frequency for checking whether the OA unit has written new
+ * reports to the circular OA buffer...
*/
-#define POLL_FREQUENCY 200
-#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+#define DEFAULT_POLL_FREQUENCY_HZ 200
+#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
static u32 i915_perf_stream_paranoid = true;
@@ -359,6 +335,12 @@ static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
* @engine: The engine (typically rcs0) being monitored by the OA unit
+ * @has_sseu: Whether @sseu was specified by userspace
+ * @sseu: internal SSEU configuration computed either from the userspace
+ * specified configuration in the opening parameters or a default value
+ * (see get_default_sseu_config())
+ * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
+ * data availability
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -378,6 +360,11 @@ struct perf_open_properties {
int oa_period_exponent;
struct intel_engine_cs *engine;
+
+ bool has_sseu;
+ struct intel_sseu sseu;
+
+ u64 poll_oa_period;
};
struct i915_oa_config_bo {
@@ -409,10 +396,7 @@ i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
struct i915_oa_config *oa_config;
rcu_read_lock();
- if (metrics_set == 1)
- oa_config = &perf->test_config;
- else
- oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
if (oa_config)
oa_config = i915_oa_config_get(oa_config);
rcu_read_unlock();
@@ -465,8 +449,8 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
* (See description of OA_TAIL_MARGIN_NSEC above for further details.)
*
* Besides returning true when there is data available to read() this function
- * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
- * and .aged_tail_idx state used for reading.
+ * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
+ * object.
*
* Note: It's safe to read OA config state here unlocked, assuming that this is
* only called while the stream is enabled, while the global OA configuration
@@ -476,28 +460,19 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
- unsigned int aged_idx;
- u32 head, hw_tail, aged_tail, aging_tail;
+ bool pollin;
+ u32 hw_tail;
u64 now;
/* We have to consider the (unlikely) possibility that read() errors
- * could result in an OA buffer reset which might reset the head,
- * tails[] and aged_tail state.
+ * could result in an OA buffer reset which might reset the head and
+ * tail state.
*/
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- /* NB: The head we observe here might effectively be a little out of
- * date (between head and tails[aged_idx].offset if there is currently
- * a read() in progress.
- */
- head = stream->oa_buffer.head;
-
- aged_idx = stream->oa_buffer.aged_tail_idx;
- aged_tail = stream->oa_buffer.tails[aged_idx].offset;
- aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
-
hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
@@ -507,64 +482,63 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
now = ktime_get_mono_fast_ns();
- /* Update the aged tail
- *
- * Flip the tail pointer available for read()s once the aging tail is
- * old enough to trust that the corresponding data will be visible to
- * the CPU...
- *
- * Do this before updating the aging pointer in case we may be able to
- * immediately start aging a new pointer too (if new data has become
- * available) without needing to wait for a later hrtimer callback.
- */
- if (aging_tail != INVALID_TAIL_PTR &&
- ((now - stream->oa_buffer.aging_timestamp) >
- OA_TAIL_MARGIN_NSEC)) {
-
- aged_idx ^= 1;
- stream->oa_buffer.aged_tail_idx = aged_idx;
+ if (hw_tail == stream->oa_buffer.aging_tail &&
+ (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
+ /* If the HW tail hasn't move since the last check and the HW
+ * tail has been aging for long enough, declare it the new
+ * tail.
+ */
+ stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
+ } else {
+ u32 head, tail, aged_tail;
- aged_tail = aging_tail;
+ /* NB: The head we observe here might effectively be a little
+ * out of date. If a read() is in progress, the head could be
+ * anywhere between this head and stream->oa_buffer.tail.
+ */
+ head = stream->oa_buffer.head - gtt_offset;
+ aged_tail = stream->oa_buffer.tail - gtt_offset;
+
+ hw_tail -= gtt_offset;
+ tail = hw_tail;
+
+ /* Walk the stream backward until we find a report with dword 0
+ * & 1 not at 0. Since the circular buffer pointers progress by
+ * increments of 64 bytes and that reports can be up to 256
+ * bytes long, we can't tell whether a report has fully landed
+ * in memory before the first 2 dwords of the following report
+ * have effectively landed.
+ *
+ * This is assuming that the writes of the OA unit land in
+ * memory in the order they were written to.
+ * If not : (╯°□°)╯︵ ┻━┻
+ */
+ while (OA_TAKEN(tail, aged_tail) >= report_size) {
+ u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
- /* Mark that we need a new pointer to start aging... */
- stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
- aging_tail = INVALID_TAIL_PTR;
- }
+ if (report32[0] != 0 || report32[1] != 0)
+ break;
- /* Update the aging tail
- *
- * We throttle aging tail updates until we have a new tail that
- * represents >= one report more data than is already available for
- * reading. This ensures there will be enough data for a successful
- * read once this new pointer has aged and ensures we will give the new
- * pointer time to age.
- */
- if (aging_tail == INVALID_TAIL_PTR &&
- (aged_tail == INVALID_TAIL_PTR ||
- OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = stream->oa_buffer.vma;
- u32 gtt_offset = i915_ggtt_offset(vma);
-
- /* Be paranoid and do a bounds check on the pointer read back
- * from hardware, just in case some spurious hardware condition
- * could put the tail out of bounds...
- */
- if (hw_tail >= gtt_offset &&
- hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- stream->oa_buffer.tails[!aged_idx].offset =
- aging_tail = hw_tail;
- stream->oa_buffer.aging_timestamp = now;
- } else {
- drm_err(&stream->perf->i915->drm,
- "Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
}
+
+ if (OA_TAKEN(hw_tail, tail) > report_size &&
+ __ratelimit(&stream->perf->tail_pointer_race))
+ DRM_NOTE("unlanded report(s) head=0x%x "
+ "tail=0x%x hw_tail=0x%x\n",
+ head, tail, hw_tail);
+
+ stream->oa_buffer.tail = gtt_offset + tail;
+ stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
+ stream->oa_buffer.aging_timestamp = now;
}
+ pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
+ stream->oa_buffer.head - gtt_offset) >= report_size;
+
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return aged_tail == INVALID_TAIL_PTR ?
- false : OA_TAKEN(aged_tail, head) >= report_size;
+ return pollin;
}
/**
@@ -682,7 +656,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -693,19 +666,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
- * An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
- /*
* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -838,13 +803,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
/*
- * The above reason field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ * Clear out the first 2 dword as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -985,7 +948,6 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
u32 taken;
int ret = 0;
@@ -996,17 +958,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
head = stream->oa_buffer.head;
- aged_tail_idx = stream->oa_buffer.aged_tail_idx;
- tail = stream->oa_buffer.tails[aged_tail_idx].offset;
+ tail = stream->oa_buffer.tail;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- /* An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
* while indexing relative to oa_buf_base.
*/
@@ -1064,13 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- /* The above report-id field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ /* Clear out the first 2 dwords as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
@@ -1449,8 +1402,8 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1472,8 +1425,6 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1503,8 +1454,8 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1528,8 +1479,6 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* memory...
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- stream->pollin = false;
}
static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
@@ -1559,8 +1508,8 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
gtt_offset & GEN12_OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = gtt_offset;
/*
* Reset state used to recognise context switches, affecting which
@@ -1585,8 +1534,6 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0,
stream->oa_buffer.vma->size);
-
- stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
@@ -1972,10 +1919,11 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static struct i915_request *
+static int
emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
- struct intel_context *ce)
+ struct intel_context *ce,
+ struct i915_active *active)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1983,7 +1931,7 @@ emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return ERR_CAST(vma);
+ return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1997,6 +1945,18 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_vma_unpin;
}
+ if (!IS_ERR_OR_NULL(active)) {
+ /* After all individual context modifications */
+ err = i915_request_await_active(rq, active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ goto err_add_request;
+
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
@@ -2011,14 +1971,13 @@ emit_oa_config(struct i915_perf_stream *stream,
if (err)
goto err_add_request;
- i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err ? ERR_PTR(err) : rq;
+ return err;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2026,8 +1985,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static struct i915_request *
-hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2046,7 +2006,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream)
intel_uncore_rmw(uncore, GEN6_UCGCTL1,
0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2116,9 +2078,6 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
reg_state[ctx_flexeu0 + i * 2 + 1] =
oa_config_flex_reg(stream->oa_config, flex_regs[i]);
-
- reg_state[CTX_R_PWR_CLK_STATE] =
- intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -2196,8 +2155,10 @@ static int gen8_modify_context(struct intel_context *ce,
return err;
}
-static int gen8_modify_self(struct intel_context *ce,
- const struct flex *flex, unsigned int count)
+static int
+gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count,
+ struct i915_active *active)
{
struct i915_request *rq;
int err;
@@ -2208,8 +2169,17 @@ static int gen8_modify_self(struct intel_context *ce,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ if (!IS_ERR_OR_NULL(active)) {
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
err = gen8_load_flex(rq, ce, flex, count);
+ if (err)
+ goto err_add_request;
+err_add_request:
i915_request_add(rq);
return err;
}
@@ -2243,7 +2213,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
-static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
int err;
struct intel_context *ce = stream->pinned_ctx;
@@ -2252,7 +2223,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
{
GEN8_OACTXCONTROL,
stream->perf->ctx_oactxctrl_offset + 1,
- enable ? GEN8_OA_COUNTER_RESUME : 0,
+ active ? GEN8_OA_COUNTER_RESUME : 0,
},
};
/* Offsets in regs_lri are not used since this configuration is only
@@ -2264,13 +2235,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
GEN12_OAR_OACONTROL,
GEN12_OAR_OACONTROL_OFFSET + 1,
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
- (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
},
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- enable ?
+ active ?
GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
0)
},
@@ -2287,7 +2258,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
return err;
/* Apply regs_lri using LRI with pinned context */
- return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
}
/*
@@ -2315,9 +2286,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
* Note: it's only the RCS/Render context that has any OA state.
* Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int oa_configure_all_contexts(struct i915_perf_stream *stream,
- struct flex *regs,
- size_t num_regs)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs,
+ struct i915_active *active)
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_engine_cs *engine;
@@ -2374,7 +2347,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, num_regs);
+ err = gen8_modify_self(ce, regs, num_regs, active);
if (err)
return err;
}
@@ -2382,8 +2355,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
-static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+gen12_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
struct flex regs[] = {
{
@@ -2392,11 +2367,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
},
};
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
/* The MMIO offsets for Flex EU registers aren't contiguous */
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
@@ -2429,11 +2408,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
for (i = 2; i < ARRAY_SIZE(regs); i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
}
-static struct i915_request *
-gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2473,11 +2455,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = lrc_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2487,8 +2471,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static struct i915_request *
-gen12_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2517,9 +2502,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen12_configure_all_contexts(stream, oa_config);
+ ret = gen12_configure_all_contexts(stream, oa_config, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
/*
* For Gen12, performance counters are context
@@ -2527,12 +2512,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
* requested this.
*/
if (stream->ctx) {
- ret = gen12_configure_oar_context(stream, true);
+ ret = gen12_configure_oar_context(stream, active);
if (ret)
- return ERR_PTR(ret);
+ return ret;
}
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2540,7 +2527,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
@@ -2550,7 +2537,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- lrc_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2561,11 +2548,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen12_configure_all_contexts(stream, NULL);
+ gen12_configure_all_contexts(stream, NULL, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
- gen12_configure_oar_context(stream, false);
+ gen12_configure_oar_context(stream, NULL);
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2657,11 +2644,13 @@ static void gen12_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
+ stream->pollin = false;
+
stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
- ns_to_ktime(POLL_PERIOD),
+ ns_to_ktime(stream->poll_oa_period),
HRTIMER_MODE_REL_PINNED);
}
@@ -2700,6 +2689,14 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
50))
drm_err(&stream->perf->i915->drm,
"wait for OA to be disabled timed out\n");
+
+ intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+ if (intel_wait_for_register(uncore,
+ GEN12_OA_TLB_INV_CR,
+ 1, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA tlb invalidate timed out\n");
}
/**
@@ -2729,16 +2726,52 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
{
- struct i915_request *rq;
+ struct i915_active *active;
+ int err;
- rq = stream->perf->ops.enable_metric_set(stream);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ active = i915_active_create();
+ if (!active)
+ return -ENOMEM;
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ err = stream->perf->ops.enable_metric_set(stream, active);
+ if (err == 0)
+ __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
- return 0;
+ i915_active_put(active);
+ return err;
+}
+
+static void
+get_default_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine)
+{
+ const struct sseu_dev_info *devinfo_sseu =
+ &RUNTIME_INFO(engine->i915)->sseu;
+
+ *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
+
+ if (IS_GEN(engine->i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter which ones
+ * we select - just turn off low bits in the amount of half of
+ * all available subslices per slice.
+ */
+ out_sseu->subslice_mask =
+ ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
+ out_sseu->slice_mask = 0x1;
+ }
+}
+
+static int
+get_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine,
+ const struct drm_i915_gem_context_param_sseu *drm_sseu)
+{
+ if (drm_sseu->engine.engine_class != engine->uabi_class ||
+ drm_sseu->engine.engine_instance != engine->uabi_instance)
+ return -EINVAL;
+
+ return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu);
}
/**
@@ -2873,6 +2906,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
+
+ perf->sseu = props->sseu;
WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
@@ -2924,58 +2959,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
stream = READ_ONCE(engine->i915->perf.exclusive_stream);
- /*
- * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
- * is already doing that, so nothing to be done for gen12 here.
- */
if (stream && INTEL_GEN(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
/**
- * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
- * @stream: An i915 perf stream
- * @file: An i915 perf stream file
- * @buf: destination buffer given by userspace
- * @count: the number of bytes userspace wants to read
- * @ppos: (inout) file seek position (unused)
- *
- * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
- * ensure that if we've successfully copied any data then reporting that takes
- * precedence over any internal error status, so the data isn't lost.
- *
- * For example ret will be -ENOSPC whenever there is more buffered data than
- * can be copied to userspace, but that's only interesting if we weren't able
- * to copy some data because it implies the userspace buffer is too small to
- * receive a single record (and we never split records).
- *
- * Another case with ret == -EFAULT is more of a grey area since it would seem
- * like bad form for userspace to ask us to overrun its buffer, but the user
- * knows best:
- *
- * http://yarchive.net/comp/linux/partial_reads_writes.html
- *
- * Returns: The number of bytes copied or a negative error code on failure.
- */
-static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
- struct file *file,
- char __user *buf,
- size_t count,
- loff_t *ppos)
-{
- /* Note we keep the offset (aka bytes read) separate from any
- * error status so that the final check for whether we return
- * the bytes read with a higher precedence than any error (see
- * comment below) doesn't need to be handled/duplicated in
- * stream->ops->read() implementations.
- */
- size_t offset = 0;
- int ret = stream->ops->read(stream, buf, count, &offset);
-
- return offset ?: (ret ?: -EAGAIN);
-}
-
-/**
* i915_perf_read - handles read() FOP for i915 perf stream FDs
* @file: An i915 perf stream file
* @buf: destination buffer given by userspace
@@ -3000,7 +2988,8 @@ static ssize_t i915_perf_read(struct file *file,
{
struct i915_perf_stream *stream = file->private_data;
struct i915_perf *perf = stream->perf;
- ssize_t ret;
+ size_t offset = 0;
+ int ret;
/* To ensure it's handled consistently we simply treat all reads of a
* disabled stream as an error. In particular it might otherwise lead
@@ -3023,13 +3012,12 @@ static ssize_t i915_perf_read(struct file *file,
return ret;
mutex_lock(&perf->lock);
- ret = i915_perf_read_locked(stream, file,
- buf, count, ppos);
+ ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
- } while (ret == -EAGAIN);
+ } while (!offset && !ret);
} else {
mutex_lock(&perf->lock);
- ret = i915_perf_read_locked(stream, file, buf, count, ppos);
+ ret = stream->ops->read(stream, buf, count, &offset);
mutex_unlock(&perf->lock);
}
@@ -3040,15 +3028,15 @@ static ssize_t i915_perf_read(struct file *file,
* and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback
* before reporting another EPOLLIN event.
+ * The exception to this is if ops->read() returned -ENOSPC which means
+ * that more OA data is available than could fit in the user provided
+ * buffer. In this case we want the next poll() call to not block.
*/
- if (ret >= 0 || ret == -EAGAIN) {
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
+ if (ret != -ENOSPC)
stream->pollin = false;
- }
- return ret;
+ /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
+ return offset ?: (ret ?: -EAGAIN);
}
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -3061,7 +3049,8 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
wake_up(&stream->poll_wq);
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+ hrtimer_forward_now(hrtimer,
+ ns_to_ktime(stream->poll_oa_period));
return HRTIMER_RESTART;
}
@@ -3192,7 +3181,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- struct i915_request *rq;
+ int err;
/*
* If OA is bound to a specific context, emit the
@@ -3203,13 +3192,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- rq = emit_oa_config(stream, config, oa_context(stream));
- if (!IS_ERR(rq)) {
+ err = emit_oa_config(stream, config, oa_context(stream), NULL);
+ if (!err)
config = xchg(&stream->oa_config, config);
- i915_request_put(rq);
- } else {
- ret = PTR_ERR(rq);
- }
+ else
+ ret = err;
}
i915_oa_config_put(config);
@@ -3422,6 +3409,14 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
privileged_op = true;
}
+ /*
+ * Asking for SSEU configuration is a priviliged operation.
+ */
+ if (props->has_sseu)
+ privileged_op = true;
+ else
+ get_default_sseu_config(&props->sseu, props->engine);
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
@@ -3442,6 +3437,7 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
stream->perf = perf;
stream->ctx = specific_ctx;
+ stream->poll_oa_period = props->poll_oa_period;
ret = i915_oa_stream_init(stream, param, props);
if (ret)
@@ -3517,8 +3513,10 @@ static int read_properties_unlocked(struct i915_perf *perf,
{
u64 __user *uprop = uprops;
u32 i;
+ int ret;
memset(props, 0, sizeof(struct perf_open_properties));
+ props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
if (!n_props) {
DRM_DEBUG("No i915 perf properties given\n");
@@ -3548,7 +3546,6 @@ static int read_properties_unlocked(struct i915_perf *perf,
for (i = 0; i < n_props; i++) {
u64 oa_period, oa_freq_hz;
u64 id, value;
- int ret;
ret = get_user(id, uprop);
if (ret)
@@ -3634,6 +3631,32 @@ static int read_properties_unlocked(struct i915_perf *perf,
case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
props->hold_preemption = !!value;
break;
+ case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
+ struct drm_i915_gem_context_param_sseu user_sseu;
+
+ if (copy_from_user(&user_sseu,
+ u64_to_user_ptr(value),
+ sizeof(user_sseu))) {
+ DRM_DEBUG("Unable to copy global sseu parameter\n");
+ return -EFAULT;
+ }
+
+ ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
+ if (ret) {
+ DRM_DEBUG("Invalid SSEU configuration\n");
+ return ret;
+ }
+ props->has_sseu = true;
+ break;
+ }
+ case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
+ if (value < 100000 /* 100us */) {
+ DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
+ value);
+ return -EINVAL;
+ }
+ props->poll_oa_period = value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -3716,7 +3739,6 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
void i915_perf_register(struct drm_i915_private *i915)
{
struct i915_perf *perf = &i915->perf;
- int ret;
if (!perf->i915)
return;
@@ -3730,64 +3752,7 @@ void i915_perf_register(struct drm_i915_private *i915)
perf->metrics_kobj =
kobject_create_and_add("metrics",
&i915->drm.primary->kdev->kobj);
- if (!perf->metrics_kobj)
- goto exit;
-
- sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
-
- if (IS_TIGERLAKE(i915)) {
- i915_perf_load_test_config_tgl(i915);
- } else if (INTEL_GEN(i915) >= 11) {
- i915_perf_load_test_config_icl(i915);
- } else if (IS_CANNONLAKE(i915)) {
- i915_perf_load_test_config_cnl(i915);
- } else if (IS_COFFEELAKE(i915)) {
- if (IS_CFL_GT2(i915))
- i915_perf_load_test_config_cflgt2(i915);
- if (IS_CFL_GT3(i915))
- i915_perf_load_test_config_cflgt3(i915);
- } else if (IS_GEMINILAKE(i915)) {
- i915_perf_load_test_config_glk(i915);
- } else if (IS_KABYLAKE(i915)) {
- if (IS_KBL_GT2(i915))
- i915_perf_load_test_config_kblgt2(i915);
- else if (IS_KBL_GT3(i915))
- i915_perf_load_test_config_kblgt3(i915);
- } else if (IS_BROXTON(i915)) {
- i915_perf_load_test_config_bxt(i915);
- } else if (IS_SKYLAKE(i915)) {
- if (IS_SKL_GT2(i915))
- i915_perf_load_test_config_sklgt2(i915);
- else if (IS_SKL_GT3(i915))
- i915_perf_load_test_config_sklgt3(i915);
- else if (IS_SKL_GT4(i915))
- i915_perf_load_test_config_sklgt4(i915);
- } else if (IS_CHERRYVIEW(i915)) {
- i915_perf_load_test_config_chv(i915);
- } else if (IS_BROADWELL(i915)) {
- i915_perf_load_test_config_bdw(i915);
- } else if (IS_HASWELL(i915)) {
- i915_perf_load_test_config_hsw(i915);
- }
-
- if (perf->test_config.id == 0)
- goto sysfs_error;
-
- ret = sysfs_create_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
- if (ret)
- goto sysfs_error;
-
- perf->test_config.perf = perf;
- kref_init(&perf->test_config.ref);
- goto exit;
-
-sysfs_error:
- kobject_put(perf->metrics_kobj);
- perf->metrics_kobj = NULL;
-
-exit:
mutex_unlock(&perf->lock);
}
@@ -3807,9 +3772,6 @@ void i915_perf_unregister(struct drm_i915_private *i915)
if (!perf->metrics_kobj)
return;
- sysfs_remove_group(perf->metrics_kobj,
- &perf->test_config.sysfs_metric);
-
kobject_put(perf->metrics_kobj);
perf->metrics_kobj = NULL;
}
@@ -4408,6 +4370,11 @@ void i915_perf_init(struct drm_i915_private *i915)
ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_state_init(&perf->tail_pointer_race,
+ 5 * HZ, 10);
+ ratelimit_set_flags(&perf->tail_pointer_race,
+ RATELIMIT_MSG_ON_RELEASE);
+
atomic64_set(&perf->noa_programming_delay,
500 * 1000 /* 500us */);
@@ -4468,8 +4435,15 @@ int i915_perf_ioctl_version(void)
* preemption on a particular context so that performance data is
* accessible from a delta of MI_RPC reports without looking at the
* OA buffer.
+ *
+ * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
+ * be run for the duration of the performance recording based on
+ * their SSEU configuration.
+ *
+ * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
+ * interval for the hrtimer used to check for OA data.
*/
- return 3;
+ return 5;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a0e22f00f6cf..a36a455ae336 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -16,11 +16,13 @@
#include <linux/uuid.h>
#include <linux/wait.h>
+#include "gt/intel_sseu.h"
#include "i915_reg.h"
#include "intel_wakeref.h"
struct drm_i915_private;
struct file;
+struct i915_active;
struct i915_gem_context;
struct i915_perf;
struct i915_vma;
@@ -272,21 +274,10 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ * @aging_tail: The last HW tail reported by HW. The data
+ * might not have made it to memory yet though.
*/
- unsigned int aged_tail_idx;
+ u32 aging_tail;
/**
* @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
@@ -302,6 +293,11 @@ struct i915_perf_stream {
* OA buffer data to userspace.
*/
u32 head;
+
+ /**
+ * @tail: The last verified tail that can be read by userspace.
+ */
+ u32 tail;
} oa_buffer;
/**
@@ -309,6 +305,12 @@ struct i915_perf_stream {
* reprogrammed.
*/
struct i915_vma *noa_wait;
+
+ /**
+ * @poll_oa_period: The period in nanoseconds at which the OA
+ * buffer should be checked for available data.
+ */
+ u64 poll_oa_period;
};
/**
@@ -339,8 +341,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- struct i915_request *
- (*enable_metric_set)(struct i915_perf_stream *stream);
+ int (*enable_metric_set)(struct i915_perf_stream *stream,
+ struct i915_active *active);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -408,12 +410,22 @@ struct i915_perf {
struct i915_perf_stream *exclusive_stream;
/**
+ * @sseu: sseu configuration selected to run while perf is active,
+ * applies to all contexts.
+ */
+ struct intel_sseu sseu;
+
+ /**
* For rate limiting any notifications of spurious
* invalid OA reports
*/
struct ratelimit_state spurious_report_rs;
- struct i915_oa_config test_config;
+ /**
+ * For rate limiting any notifications of tail pointer
+ * race.
+ */
+ struct ratelimit_state tail_pointer_race;
u32 gen7_latched_oastatus1;
u32 ctx_oactxctrl_offset;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2c062534eac1..230e9256ab30 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1115,7 +1115,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
- dev_info(i915->drm.dev, "PMU not supported for this GPU.");
+ drm_info(&i915->drm, "PMU not supported for this GPU.");
return;
}
@@ -1178,7 +1178,7 @@ err_name:
if (!is_igp(i915))
kfree(pmu->name);
err:
- dev_notice(i915->drm.dev, "Failed to register PMU!\n");
+ drm_notice(&i915->drm, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 309cb7d96b35..edda3f29c8aa 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -3092,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
@@ -4322,6 +4325,96 @@ enum {
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
+/* VRR registers */
+#define _TRANS_VRR_CTL_A 0x60420
+#define _TRANS_VRR_CTL_B 0x61420
+#define _TRANS_VRR_CTL_C 0x62420
+#define _TRANS_VRR_CTL_D 0x63420
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+
+#define _TRANS_VRR_VMAX_A 0x60424
+#define _TRANS_VRR_VMAX_B 0x61424
+#define _TRANS_VRR_VMAX_C 0x62424
+#define _TRANS_VRR_VMAX_D 0x63424
+#define TRANS_VRR_VMAX(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMAX_A)
+#define VRR_VMAX_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_VMIN_A 0x60434
+#define _TRANS_VRR_VMIN_B 0x61434
+#define _TRANS_VRR_VMIN_C 0x62434
+#define _TRANS_VRR_VMIN_D 0x63434
+#define TRANS_VRR_VMIN(trans) _MMIO_TRANS2(trans, _TRANS_VRR_VMIN_A)
+#define VRR_VMIN_MASK REG_GENMASK(15, 0)
+
+#define _TRANS_VRR_VMAXSHIFT_A 0x60428
+#define _TRANS_VRR_VMAXSHIFT_B 0x61428
+#define _TRANS_VRR_VMAXSHIFT_C 0x62428
+#define _TRANS_VRR_VMAXSHIFT_D 0x63428
+#define TRANS_VRR_VMAXSHIFT(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VMAXSHIFT_A)
+#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16)
+#define VRR_VMAXSHIFT_DEC REG_BIT(16)
+#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0)
+
+#define _TRANS_VRR_STATUS_A 0x6042C
+#define _TRANS_VRR_STATUS_B 0x6142C
+#define _TRANS_VRR_STATUS_C 0x6242C
+#define _TRANS_VRR_STATUS_D 0x6342C
+#define TRANS_VRR_STATUS(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS_A)
+#define VRR_STATUS_VMAX_REACHED REG_BIT(31)
+#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30)
+#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29)
+#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28)
+#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27)
+#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26)
+#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20)
+#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0)
+#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1)
+#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2)
+#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3)
+#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4)
+#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5)
+#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6)
+
+#define _TRANS_VRR_VTOTAL_PREV_A 0x60480
+#define _TRANS_VRR_VTOTAL_PREV_B 0x61480
+#define _TRANS_VRR_VTOTAL_PREV_C 0x62480
+#define _TRANS_VRR_VTOTAL_PREV_D 0x63480
+#define TRANS_VRR_VTOTAL_PREV(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_VTOTAL_PREV_A)
+#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31)
+#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30)
+#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29)
+#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_FLIPLINE_A 0x60438
+#define _TRANS_VRR_FLIPLINE_B 0x61438
+#define _TRANS_VRR_FLIPLINE_C 0x62438
+#define _TRANS_VRR_FLIPLINE_D 0x63438
+#define TRANS_VRR_FLIPLINE(trans) _MMIO_TRANS2(trans, \
+ _TRANS_VRR_FLIPLINE_A)
+#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_VRR_STATUS2_A 0x6043C
+#define _TRANS_VRR_STATUS2_B 0x6143C
+#define _TRANS_VRR_STATUS2_C 0x6243C
+#define _TRANS_VRR_STATUS2_D 0x6343C
+#define TRANS_VRR_STATUS2(trans) _MMIO_TRANS2(trans, _TRANS_VRR_STATUS2_A)
+#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0)
+
+#define _TRANS_PUSH_A 0x60A70
+#define _TRANS_PUSH_B 0x61A70
+#define _TRANS_PUSH_C 0x62A70
+#define _TRANS_PUSH_D 0x63A70
+#define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A)
+#define TRANS_PUSH_EN REG_BIT(31)
+#define TRANS_PUSH_SEND REG_BIT(30)
+
/*
* HSW+ eDP PSR registers
*
@@ -6762,7 +6855,7 @@ enum {
#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
#define PLANE_CTL_FORMAT_P016 (7 << 24)
-#define PLANE_CTL_FORMAT_AYUV (8 << 24)
+#define PLANE_CTL_FORMAT_XYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
@@ -9698,8 +9791,11 @@ enum skl_power_gate {
#define TRANS_DDI_BPC_10 (1 << 20)
#define TRANS_DDI_BPC_6 (2 << 20)
#define TRANS_DDI_BPC_12 (3 << 20)
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK REG_GENMASK(19, 18) /* bdw-cnl */
+#define TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
+#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15) /* bdw-cnl */
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -9726,12 +9822,10 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
-#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
- _TRANS_DDI_FUNC_CTL2_A)
-#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
-#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE REG_BIT(4)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK REG_GENMASK(2, 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
@@ -9792,6 +9886,24 @@ enum skl_power_gate {
#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
+/* DDI DP Compliance Control */
+#define _DDI_DP_COMP_CTL_A 0x605F0
+#define _DDI_DP_COMP_CTL_B 0x615F0
+#define DDI_DP_COMP_CTL(pipe) _MMIO_PIPE(pipe, _DDI_DP_COMP_CTL_A, _DDI_DP_COMP_CTL_B)
+#define DDI_DP_COMP_CTL_ENABLE (1 << 31)
+#define DDI_DP_COMP_CTL_D10_2 (0 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_0 (1 << 28)
+#define DDI_DP_COMP_CTL_PRBS7 (2 << 28)
+#define DDI_DP_COMP_CTL_CUSTOM80 (3 << 28)
+#define DDI_DP_COMP_CTL_HBR2 (4 << 28)
+#define DDI_DP_COMP_CTL_SCRAMBLED_1 (5 << 28)
+#define DDI_DP_COMP_CTL_HBR2_RESET (0xFC << 0)
+
+/* DDI DP Compliance Pattern */
+#define _DDI_DP_COMP_PAT_A 0x605F4
+#define _DDI_DP_COMP_PAT_B 0x615F4
+#define DDI_DP_COMP_PAT(pipe, i) _MMIO(_PIPE(pipe, _DDI_DP_COMP_PAT_A, _DDI_DP_COMP_PAT_B) + (i) * 4)
+
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
@@ -10739,6 +10851,12 @@ enum skl_power_gate {
#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
+#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24)
+#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20)
+#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14)
+#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10)
+#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4)
+#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0)
#define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
_PAL_PREC_MULTI_SEG_INDEX_A, \
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c0df71d7d0ff..22635bbabf06 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -101,6 +101,11 @@ static signed long i915_fence_wait(struct dma_fence *fence,
timeout);
}
+struct kmem_cache *i915_request_slab_cache(void)
+{
+ return global.slab_requests;
+}
+
static void i915_fence_release(struct dma_fence *fence)
{
struct i915_request *rq = to_request(fence);
@@ -115,6 +120,10 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
+ /* Keep one request on each engine for reserved use under mempressure */
+ if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
+ return;
+
kmem_cache_free(global.slab_requests, rq);
}
@@ -629,14 +638,22 @@ static void retire_requests(struct intel_timeline *tl)
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl,
+ struct i915_request **rsvd,
+ gfp_t gfp)
{
struct i915_request *rq;
- if (list_empty(&tl->requests))
- goto out;
+ /* If we cannot wait, dip into our reserves */
+ if (!gfpflags_allow_blocking(gfp)) {
+ rq = xchg(rsvd, NULL);
+ if (!rq) /* Use the normal failure path for one final WARN */
+ goto out;
- if (!gfpflags_allow_blocking(gfp))
+ return rq;
+ }
+
+ if (list_empty(&tl->requests))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
@@ -721,7 +738,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(tl, gfp);
+ rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -1444,9 +1461,7 @@ void i915_request_add(struct i915_request *rq)
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
- local_bh_disable();
__i915_request_queue(rq, &attr);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
mutex_unlock(&tl->mutex);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 3c552bfea67a..d8ce908e1346 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -300,6 +300,8 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
+struct kmem_cache *i915_request_slab_cache(void);
+
struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 68b06a7ba667..37cfcf5b321b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,12 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
engine->execlists.queue_priority_hint = prio;
/*
@@ -464,11 +470,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
+ local_bh_disable();
+
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_EXTERNAL |
I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
+ local_bh_enable(); /* kick submission tasklet */
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a3d38e089b6e..7daf81f55c90 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -421,7 +421,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
- pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
+ pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 997b2998f1f2..a3a81bb8f2c3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -38,7 +38,10 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (!f->dma.error) {
dma_fence_get(&f->dma);
- queue_work(system_unbound_wq, &f->work);
+ if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
+ fence_work(&f->work);
+ else
+ queue_work(system_unbound_wq, &f->work);
} else {
fence_complete(f);
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
index 3a22b287e201..2c409f11c5c5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -32,6 +32,10 @@ struct dma_fence_work {
const struct dma_fence_work_ops *ops;
};
+enum {
+ DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
+};
+
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops);
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
@@ -41,4 +45,23 @@ static inline void dma_fence_work_commit(struct dma_fence_work *f)
i915_sw_fence_commit(&f->chain);
}
+/**
+ * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
+ * @f: the fenced worker
+ *
+ * Instead of always scheduling a worker to execute the callback (see
+ * dma_fence_work_commit()), we try to execute the callback immediately in
+ * the local context. It is required that the fence be committed before it
+ * is published, and that no other threads try to tamper with the number
+ * of asynchronous waits on the fence (or else the callback will be
+ * executed in the wrong context, i.e. not the callers).
+ */
+static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
+{
+ if (atomic_read(&f->chain.pending) <= 1)
+ __set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
+
+ dma_fence_work_commit(f);
+}
+
#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index ed69b5d4a375..b3a24eac21f1 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -20,14 +20,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
}
if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
+ drm_info(&i915->drm, "switched on\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
i915_resume_switcheroo(i915);
i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ drm_info(&i915->drm, "switched off\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend_switcheroo(i915, pmm);
i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 029854ae65fc..e28eae4a8f70 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -101,5 +101,6 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
*/
barrier();
- mod_timer(t, jiffies + timeout);
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5b3efb43a8ef..f0383a68c981 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -608,18 +608,6 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
return true;
}
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
-{
- /*
- * Combine the assertion that the object is bound and that we have
- * pinned its pages. But we should never have bound the object
- * more than we have pinned its pages. (For complete accuracy, we
- * assume that no else is pinning the pages, but as a rough assertion
- * that we will not run into problems later, this will do!)
- */
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
-}
-
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
@@ -738,12 +726,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- atomic_inc(&obj->bind_count);
- assert_bind_count(obj);
- }
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
@@ -761,12 +743,6 @@ i915_vma_detach(struct i915_vma *vma)
* it to be reaped by the shrinker.
*/
list_del(&vma->vm_link);
- if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- assert_bind_count(obj);
- atomic_dec(&obj->bind_count);
- }
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -913,11 +889,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- /* No more allocations allowed once we hold vm->mutex */
- err = mutex_lock_interruptible(&vma->vm->mutex);
+ /*
+ * Differentiate between user/kernel vma inside the aliasing-ppgtt.
+ *
+ * We conflate the Global GTT with the user's vma when using the
+ * aliasing-ppgtt, but it is still vitally important to try and
+ * keep the use cases distinct. For example, userptr objects are
+ * not allowed inside the Global GTT as that will cause lock
+ * inversions when we have to evict them the mmu_notifier callbacks -
+ * but they are allowed to be part of the user ppGTT which can never
+ * be mapped. As such we try to give the distinct users of the same
+ * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
+ * and i915_ppgtt separate].
+ *
+ * NB this may cause us to mask real lock inversions -- while the
+ * code is safe today, lockdep may not be able to spot future
+ * transgressions.
+ */
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex,
+ !(flags & PIN_GLOBAL));
if (err)
goto err_fence;
+ /* No more allocations allowed now we hold vm->mutex */
+
if (unlikely(i915_vma_is_closed(vma))) {
err = -ENOENT;
goto err_unlock;
@@ -980,7 +975,7 @@ err_unlock:
mutex_unlock(&vma->vm->mutex);
err_fence:
if (work)
- dma_fence_work_commit(&work->base);
+ dma_fence_work_commit_imm(&work->base);
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
@@ -1097,6 +1092,7 @@ void i915_vma_release(struct kref *ref)
void i915_vma_parked(struct intel_gt *gt)
{
struct i915_vma *vma, *next;
+ LIST_HEAD(closed);
spin_lock_irq(&gt->closed_lock);
list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
@@ -1108,28 +1104,26 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (i915_vm_tryopen(vm)) {
- list_del_init(&vma->closed_link);
- } else {
+ if (!i915_vm_tryopen(vm)) {
i915_gem_object_put(obj);
- obj = NULL;
+ continue;
}
- spin_unlock_irq(&gt->closed_lock);
+ list_move(&vma->closed_link, &closed);
+ }
+ spin_unlock_irq(&gt->closed_lock);
- if (obj) {
- __i915_vma_put(vma);
- i915_gem_object_put(obj);
- }
+ /* As the GT is held idle, no vma can be reopened as we destroy them */
+ list_for_each_entry_safe(vma, next, &closed, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- i915_vm_close(vm);
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
- /* Restart after dropping lock */
- spin_lock_irq(&gt->closed_lock);
- next = list_first_entry(&gt->closed_vma,
- typeof(*next), closed_link);
+ i915_gem_object_put(obj);
+ i915_vm_close(vm);
}
- spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1173,7 +1167,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active, 0);
+ err = i915_request_await_active(rq, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL);
if (err)
return err;
@@ -1214,6 +1209,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
}
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
+ i915_active_add_request(&vma->fence->active, rq);
+
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
@@ -1227,18 +1226,6 @@ int __i915_vma_unbind(struct i915_vma *vma)
lockdep_assert_held(&vma->vm->mutex);
- /*
- * First wait upon any activity as retiring the request may
- * have side-effects such as unpinning or even unbinding this vma.
- *
- * XXX Actually waiting under the vm->mutex is a hinderance and
- * should be pipelined wherever possible. In cases where that is
- * unavoidable, we should lift the wait to before the mutex.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
@@ -1260,6 +1247,9 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /* Force a pagefault for domain tracking on next user access */
+ i915_vma_revoke_mmap(vma);
+
/*
* Check that we have flushed all writes through the GGTT
* before the unbind, other due to non-strict nature of those
@@ -1276,12 +1266,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_flush_writes(vma);
/* release the fence reg _after_ flushing */
- ret = i915_vma_revoke_fence(vma);
- if (ret)
- return ret;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_vma_revoke_mmap(vma);
+ i915_vma_revoke_fence(vma);
__i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -1312,16 +1297,21 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node))
return 0;
- if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
- /* XXX not always required: nop_clear_range */
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
goto out_rpm;
- err = mutex_lock_interruptible(&vm->mutex);
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
if (err)
goto out_rpm;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e1ced1df13e1..8ad1daabcd58 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,10 +30,10 @@
#include <drm/drm_mm.h>
+#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_fence_reg.h"
#include "i915_active.h"
#include "i915_request.h"
@@ -326,7 +326,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
* True if the vma has a fence, false otherwise.
*/
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+void i915_vma_revoke_fence(struct i915_vma *vma);
int __i915_vma_pin_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d7fe12734db8..db8496b4c38d 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -980,35 +980,32 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
info->pipe_mask = 0;
+ info->cpu_transcoder_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = info->pipe_mask;
-
- if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- enabled_mask &= ~BIT(PIPE_A);
- if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- enabled_mask &= ~BIT(PIPE_B);
- if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- enabled_mask &= ~BIT(PIPE_C);
- if (INTEL_GEN(dev_priv) >= 12 &&
- (dfsm & TGL_DFSM_PIPE_D_DISABLE))
- enabled_mask &= ~BIT(PIPE_D);
- /*
- * At least one pipe should be enabled and if there are
- * disabled pipes, they should be the last ones, with no holes
- * in the mask.
- */
- if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- drm_err(&dev_priv->drm,
- "invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
- else
- info->pipe_mask = enabled_mask;
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_A);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ }
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_B);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ }
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
+ info->pipe_mask &= ~BIT(PIPE_C);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ }
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
+ info->pipe_mask &= ~BIT(PIPE_D);
+ info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ }
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
info->display.has_hdcp = 0;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1ecb9df2de91..cce6a72c5ebc 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -168,6 +168,7 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 pipe_mask;
+ u8 cpu_transcoder_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 6b922efb1d7c..8aa12cad93ce 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -495,6 +495,5 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
- dev_info(i915->drm.dev,
- "Found %uMB of eDRAM\n", i915->edram_size_mb);
+ drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8375054ba27d..b632b6bb9c3e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4016,6 +4016,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int color_plane);
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
@@ -4038,7 +4039,9 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
- skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ unsigned int latency = dev_priv->wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
break;
@@ -4972,12 +4975,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
+ unsigned int latency,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -5106,9 +5109,10 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
+ unsigned int latency = dev_priv->wm.skl_latency[level];
- skl_compute_plane_wm(crtc_state, level, wm_params,
- result_prev, result);
+ skl_compute_plane_wm(crtc_state, level, latency,
+ wm_params, result_prev, result);
result_prev = result;
}
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 1447e7516cb7..3f13baaef058 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- pm_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->sb_qos,
+ PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abb18b90d7c3..fa86b7ab2d99 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -665,7 +665,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
mmio_debug_resume(uncore->debug);
if (check_for_unclaimed_mmio(uncore))
- dev_info(uncore->i915->drm.dev,
+ drm_info(&uncore->i915->drm,
"Invalid mmio detected during user access\n");
spin_unlock(&uncore->debug->lock);
@@ -735,6 +735,28 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
}
/**
+ * intel_uncore_forcewake_flush - flush the delayed release
+ * @uncore: the intel_uncore structure
+ * @fw_domains: forcewake domains to flush
+ */
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!uncore->funcs.force_wake_put)
+ return;
+
+ fw_domains &= uncore->fw_domains;
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ WRITE_ONCE(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+}
+
+/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
* @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index dcfa243892c6..8d3aa8b9acf9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -209,7 +209,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
-/* Like above but the caller must manage the uncore.lock itself.
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains);
+
+/*
+ * Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 8fbf6f4d3f26..dfd87d082218 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -70,11 +70,12 @@ unlock:
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
{
- INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+ INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
/* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
- schedule_work(&wf->work);
+ mod_delayed_work(system_wq, &wf->work,
+ FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return;
}
@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
static void __intel_wakeref_put_work(struct work_struct *wrk)
{
- struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
if (atomic_add_unless(&wf->count, -1, 1))
return;
@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
atomic_set(&wf->count, 0);
wf->wakeref = 0;
- INIT_WORK(&wf->work, __intel_wakeref_put_work);
- lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+ INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
+ lockdep_init_map(&wf->work.work.lockdep_map,
+ "wakeref.work", &key->work, 0);
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 7d1e676b71ef..545c8f277c46 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,6 +8,7 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -41,7 +42,7 @@ struct intel_wakeref {
struct intel_runtime_pm *rpm;
const struct intel_wakeref_ops *ops;
- struct work_struct work;
+ struct delayed_work work;
};
struct intel_wakeref_lockclass {
@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
return atomic_inc_not_zero(&wf->count);
}
+enum {
+ INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
+ __INTEL_WAKEREF_PUT_LAST_BIT__
+};
+
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
*/
static inline void
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
+#define INTEL_WAKEREF_PUT_DELAY \
+ GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
}
+static inline void
+intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
+{
+ __intel_wakeref_put(wf,
+ INTEL_WAKEREF_PUT_ASYNC |
+ FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
+}
+
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
{
mutex_lock(&wf->mutex);
mutex_unlock(&wf->mutex);
- flush_work(&wf->work);
+ flush_delayed_work(&wf->work);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 2bb9f9f9a50a..2186386a45c8 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -86,7 +86,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
+ drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024);
}
static inline u32 context_reserved_size(struct drm_i915_private *i915)
@@ -112,7 +112,7 @@ static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region size: %uK < %uK\n",
guc_wopcm_size / SZ_1K,
(u32)(offset + sizeof(u32)) / SZ_1K);
@@ -131,7 +131,7 @@ static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
(guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
huc_fw_size / 1024);
@@ -166,7 +166,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = wopcm_size - ctx_rsvd;
if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
- dev_err(i915->drm.dev,
+ drm_err(&i915->drm,
"WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
size / SZ_1K);
@@ -175,7 +175,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
if (unlikely(guc_wopcm_size < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
guc_wopcm_size / SZ_1K, size / SZ_1K);
return false;
@@ -183,7 +183,7 @@ static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
size = huc_fw_size + WOPCM_RESERVED_SIZE;
if (unlikely(guc_wopcm_base < size)) {
- dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
guc_wopcm_base / SZ_1K, size / SZ_1K);
return false;
@@ -242,10 +242,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
return;
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
- "GuC WOPCM is already locked [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K,
- guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
goto check;
}
@@ -266,8 +264,8 @@ void intel_wopcm_init(struct intel_wopcm *wopcm)
guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
- guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
deleted file mode 100644
index 14da5c3b569d..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bdw.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x198b0000 },
- { _MMIO(0x9888), 0x078b0066 },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x258b0000 },
- { _MMIO(0x9888), 0x21850008 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
deleted file mode 100644
index 0cee3334f0a6..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
deleted file mode 100644
index 3e785bafcf99..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_bxt.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
deleted file mode 100644
index 0bdf391323ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
deleted file mode 100644
index 0ea86f70a06c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
deleted file mode 100644
index 6b862280ab78..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
deleted file mode 100644
index fc632dd890bf..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cflgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
deleted file mode 100644
index 4ca9d8f89b2f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
deleted file mode 100644
index 6cd4e9921a8a..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_chv.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x000000a0 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x338b0000 },
- { _MMIO(0x9888), 0x258b0066 },
- { _MMIO(0x9888), 0x058b0000 },
- { _MMIO(0x9888), 0x038b0000 },
- { _MMIO(0x9888), 0x03844000 },
- { _MMIO(0x9888), 0x47800080 },
- { _MMIO(0x9888), 0x57800000 },
- { _MMIO(0x1823a4), 0x00000000 },
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9840), 0x00000080 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
deleted file mode 100644
index 3cac7bbc9c71..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
deleted file mode 100644
index 1041e8914993..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_cnl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x17060000 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x13034000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07060066 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x05060000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f080040 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x07091000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x0f041000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x1d004000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x35000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x49000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x3d000000 },
- { _MMIO(0x9884), 0x00000007 },
- { _MMIO(0x9888), 0x31000000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
deleted file mode 100644
index db379f5fcbb9..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
deleted file mode 100644
index bd15ebe9aeeb..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_glk.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x19800000 },
- { _MMIO(0x9888), 0x07800063 },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x23810008 },
- { _MMIO(0x9888), 0x1d950400 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
deleted file mode 100644
index 779f343efd11..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
deleted file mode 100644
index 133721a8619f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x253a4), 0x01600000 },
- { _MMIO(0x25440), 0x00100000 },
- { _MMIO(0x25128), 0x00000000 },
- { _MMIO(0x2691c), 0x00000800 },
- { _MMIO(0x26aa0), 0x01500000 },
- { _MMIO(0x26b9c), 0x00006000 },
- { _MMIO(0x2791c), 0x00000800 },
- { _MMIO(0x27aa0), 0x01500000 },
- { _MMIO(0x27b9c), 0x00006000 },
- { _MMIO(0x2641c), 0x00000400 },
- { _MMIO(0x25380), 0x00000010 },
- { _MMIO(0x2538c), 0x00000000 },
- { _MMIO(0x25384), 0x0800aaaa },
- { _MMIO(0x25400), 0x00000004 },
- { _MMIO(0x2540c), 0x06029000 },
- { _MMIO(0x25410), 0x00000002 },
- { _MMIO(0x25404), 0x5c30ffff },
- { _MMIO(0x25100), 0x00000016 },
- { _MMIO(0x25110), 0x00000400 },
- { _MMIO(0x25104), 0x00000000 },
- { _MMIO(0x26804), 0x00001211 },
- { _MMIO(0x26884), 0x00000100 },
- { _MMIO(0x26900), 0x00000002 },
- { _MMIO(0x26908), 0x00700000 },
- { _MMIO(0x26904), 0x00000000 },
- { _MMIO(0x26984), 0x00001022 },
- { _MMIO(0x26a04), 0x00000011 },
- { _MMIO(0x26a80), 0x00000006 },
- { _MMIO(0x26a88), 0x00000c02 },
- { _MMIO(0x26a84), 0x00000000 },
- { _MMIO(0x26b04), 0x00001000 },
- { _MMIO(0x26b80), 0x00000002 },
- { _MMIO(0x26b8c), 0x00000007 },
- { _MMIO(0x26b84), 0x00000000 },
- { _MMIO(0x27804), 0x00004844 },
- { _MMIO(0x27884), 0x00000400 },
- { _MMIO(0x27900), 0x00000002 },
- { _MMIO(0x27908), 0x0e000000 },
- { _MMIO(0x27904), 0x00000000 },
- { _MMIO(0x27984), 0x00004088 },
- { _MMIO(0x27a04), 0x00000044 },
- { _MMIO(0x27a80), 0x00000006 },
- { _MMIO(0x27a88), 0x00018040 },
- { _MMIO(0x27a84), 0x00000000 },
- { _MMIO(0x27b04), 0x00004000 },
- { _MMIO(0x27b80), 0x00000002 },
- { _MMIO(0x27b8c), 0x000000e0 },
- { _MMIO(0x27b84), 0x00000000 },
- { _MMIO(0x26104), 0x00002222 },
- { _MMIO(0x26184), 0x0c006666 },
- { _MMIO(0x26284), 0x04000000 },
- { _MMIO(0x26304), 0x04000000 },
- { _MMIO(0x26400), 0x00000002 },
- { _MMIO(0x26410), 0x000000a0 },
- { _MMIO(0x26404), 0x00000000 },
- { _MMIO(0x25420), 0x04108020 },
- { _MMIO(0x25424), 0x1284a420 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00042049 },
-};
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
-
- dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
deleted file mode 100644
index ba97f732f136..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
deleted file mode 100644
index 2d92041b754f..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_icl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x0000ffff },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x0000ffff },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x0000ffff },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0xd04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9888), 0x16060000 },
- { _MMIO(0x9888), 0x24060000 },
- { _MMIO(0x9888), 0x18060000 },
- { _MMIO(0x9888), 0x1a060000 },
- { _MMIO(0x9888), 0x12060000 },
- { _MMIO(0x9888), 0x14060000 },
- { _MMIO(0x9888), 0x10060000 },
- { _MMIO(0x9888), 0x22060000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x16130000 },
- { _MMIO(0x9888), 0x24000001 },
- { _MMIO(0x9888), 0x0e130056 },
- { _MMIO(0x9888), 0x10130000 },
- { _MMIO(0x9888), 0x1a130000 },
- { _MMIO(0x9888), 0x541f0001 },
- { _MMIO(0x9888), 0x181f0000 },
- { _MMIO(0x9888), 0x4c1f0000 },
- { _MMIO(0x9888), 0x301f0000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
deleted file mode 100644
index 5c64112d720e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
deleted file mode 100644
index 1c3a67c9cfe0..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
deleted file mode 100644
index 810532fa6b63..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
deleted file mode 100644
index ebbe5a9c9fdc..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_kblgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
deleted file mode 100644
index 13d70456fabd..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
deleted file mode 100644
index 1bc359ed34e8..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt2.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810016 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
deleted file mode 100644
index fda70c51a6ec..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
deleted file mode 100644
index 6e352f881310..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt3.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
deleted file mode 100644
index df74eba5799e..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
deleted file mode 100644
index 8f345115a306..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_sklgt4.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2770), 0x00000004 },
- { _MMIO(0x2774), 0x00000000 },
- { _MMIO(0x2778), 0x00000003 },
- { _MMIO(0x277c), 0x00000000 },
- { _MMIO(0x2780), 0x00000007 },
- { _MMIO(0x2784), 0x00000000 },
- { _MMIO(0x2788), 0x00100002 },
- { _MMIO(0x278c), 0x0000fff7 },
- { _MMIO(0x2790), 0x00100002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00100082 },
- { _MMIO(0x279c), 0x0000ffef },
- { _MMIO(0x27a0), 0x001000c2 },
- { _MMIO(0x27a4), 0x0000ffe7 },
- { _MMIO(0x27a8), 0x00100001 },
- { _MMIO(0x27ac), 0x0000ffe7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x9840), 0x00000080 },
- { _MMIO(0x9888), 0x11810000 },
- { _MMIO(0x9888), 0x07810013 },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930040 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
deleted file mode 100644
index 378ab7ab78d5..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018-2019 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
deleted file mode 100644
index a29d93707345..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#include <linux/sysfs.h>
-
-#include "i915_drv.h"
-#include "i915_oa_tgl.h"
-
-static const struct i915_oa_reg b_counter_config_test_oa[] = {
- { _MMIO(0xD920), 0x00000000 },
- { _MMIO(0xD900), 0x00000000 },
- { _MMIO(0xD904), 0xF0800000 },
- { _MMIO(0xD910), 0x00000000 },
- { _MMIO(0xD914), 0xF0800000 },
- { _MMIO(0xDC40), 0x00FF0000 },
- { _MMIO(0xD940), 0x00000004 },
- { _MMIO(0xD944), 0x0000FFFF },
- { _MMIO(0xDC00), 0x00000004 },
- { _MMIO(0xDC04), 0x0000FFFF },
- { _MMIO(0xD948), 0x00000003 },
- { _MMIO(0xD94C), 0x0000FFFF },
- { _MMIO(0xDC08), 0x00000003 },
- { _MMIO(0xDC0C), 0x0000FFFF },
- { _MMIO(0xD950), 0x00000007 },
- { _MMIO(0xD954), 0x0000FFFF },
- { _MMIO(0xDC10), 0x00000007 },
- { _MMIO(0xDC14), 0x0000FFFF },
- { _MMIO(0xD958), 0x00100002 },
- { _MMIO(0xD95C), 0x0000FFF7 },
- { _MMIO(0xDC18), 0x00100002 },
- { _MMIO(0xDC1C), 0x0000FFF7 },
- { _MMIO(0xD960), 0x00100002 },
- { _MMIO(0xD964), 0x0000FFCF },
- { _MMIO(0xDC20), 0x00100002 },
- { _MMIO(0xDC24), 0x0000FFCF },
- { _MMIO(0xD968), 0x00100082 },
- { _MMIO(0xD96C), 0x0000FFEF },
- { _MMIO(0xDC28), 0x00100082 },
- { _MMIO(0xDC2C), 0x0000FFEF },
- { _MMIO(0xD970), 0x001000C2 },
- { _MMIO(0xD974), 0x0000FFE7 },
- { _MMIO(0xDC30), 0x001000C2 },
- { _MMIO(0xDC34), 0x0000FFE7 },
- { _MMIO(0xD978), 0x00100001 },
- { _MMIO(0xD97C), 0x0000FFE7 },
- { _MMIO(0xDC38), 0x00100001 },
- { _MMIO(0xDC3C), 0x0000FFE7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_test_oa[] = {
-};
-
-static const struct i915_oa_reg mux_config_test_oa[] = {
- { _MMIO(0x0D04), 0x00000200 },
- { _MMIO(0x9840), 0x00000000 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x280E0000 },
- { _MMIO(0x9888), 0x1E0E0147 },
- { _MMIO(0x9888), 0x180E0000 },
- { _MMIO(0x9888), 0x160E0000 },
- { _MMIO(0x9888), 0x1E0F1000 },
- { _MMIO(0x9888), 0x1E104000 },
- { _MMIO(0x9888), 0x2E020100 },
- { _MMIO(0x9888), 0x2C030004 },
- { _MMIO(0x9888), 0x38003000 },
- { _MMIO(0x9888), 0x1E0A8000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x49110000 },
- { _MMIO(0x9888), 0x5D101400 },
- { _MMIO(0x9888), 0x1D140020 },
- { _MMIO(0x9888), 0x1D1103A3 },
- { _MMIO(0x9888), 0x01110000 },
- { _MMIO(0x9888), 0x61111000 },
- { _MMIO(0x9888), 0x1F128000 },
- { _MMIO(0x9888), 0x17100000 },
- { _MMIO(0x9888), 0x55100630 },
- { _MMIO(0x9888), 0x57100000 },
- { _MMIO(0x9888), 0x31100000 },
- { _MMIO(0x9884), 0x00000003 },
- { _MMIO(0x9888), 0x65100002 },
- { _MMIO(0x9884), 0x00000000 },
- { _MMIO(0x9888), 0x42000001 },
-};
-
-static ssize_t
-show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "1\n");
-}
-
-void
-i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
-{
- strlcpy(dev_priv->perf.test_config.uuid,
- "80a833f0-2504-4321-8894-e9277844ce7b",
- sizeof(dev_priv->perf.test_config.uuid));
- dev_priv->perf.test_config.id = 1;
-
- dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
-
- dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-
- dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
- dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
-
- dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
-
- dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
-}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
deleted file mode 100644
index 4c25f0be825c..000000000000
--- a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_TGL_H__
-#define __I915_OA_TGL_H__
-
-struct drm_i915_private;
-
-void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 68bbb1580162..4002c984c2e0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
if (IS_ERR(active))
return PTR_ERR(active);
- i915_active_wait(&active->base);
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(active->retired)) {
struct drm_printer p = drm_err_printer(__func__);
@@ -228,11 +228,11 @@ static int live_active_barrier(void *arg)
}
i915_active_release(&active->base);
+ if (err)
+ goto out;
- if (err == 0)
- err = i915_active_wait(&active->base);
-
- if (err == 0 && !READ_ONCE(active->retired)) {
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing barriers!\n");
err = -EINVAL;
}
@@ -277,7 +277,7 @@ static struct intel_engine_cs *node_to_barrier(struct active_node *it)
void i915_active_print(struct i915_active *ref, struct drm_printer *m)
{
- drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
drm_printf(m, "\tpreallocated barriers? %s\n",
yesno(!llist_empty(&ref->preallocated_barriers)));
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 623759b73bb4..88d400b9df88 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -125,8 +125,6 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_ggtt_resume(&i915->ggtt);
- i915_gem_restore_fences(&i915->ggtt);
-
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 06ef88510209..028baae9631f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -45,8 +45,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
- unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
+ unsigned long count;
count = 0;
do {
@@ -72,30 +72,6 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
count, ggtt->vm.total / PAGE_SIZE);
- bound = 0;
- unbound = 0;
- list_for_each_entry(obj, objects, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
-
- if (atomic_read(&obj->bind_count))
- bound++;
- else
- unbound++;
- }
- GEM_BUG_ON(bound + unbound != count);
-
- if (unbound) {
- pr_err("%s: Found %lu objects unbound, expected %u!\n",
- __func__, unbound, 0);
- return -EINVAL;
- }
-
- if (bound != count) {
- pr_err("%s: Found %lu objects bound, expected %lu!\n",
- __func__, bound, count);
- return -EINVAL;
- }
-
if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index b342bef5e7c9..5d2a02fcf595 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1229,7 +1229,6 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
GEM_BUG_ON(vma->pages);
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index d1a1568c47ba..5608fab98d5d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -14,10 +14,85 @@
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
+#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
+
+static int
+alloc_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config;
+
+ oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+ if (!oa_config)
+ return -ENOMEM;
+
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
+
+ strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
+
+ mutex_lock(&perf->metrics_lock);
+
+ oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
+ if (oa_config->id < 0) {
+ mutex_unlock(&perf->metrics_lock);
+ i915_oa_config_put(oa_config);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return 0;
+}
+
+static void
+destroy_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = tmp;
+ break;
+ }
+ }
+
+ if (oa_config)
+ idr_remove(&perf->metrics_idr, oa_config->id);
+
+ mutex_unlock(&perf->metrics_lock);
+
+ if (oa_config)
+ i915_oa_config_put(oa_config);
+}
+
+static struct i915_oa_config *
+get_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return oa_config;
+}
+
static struct i915_perf_stream *
test_stream(struct i915_perf *perf)
{
struct drm_i915_perf_open_param param = {};
+ struct i915_oa_config *oa_config = get_empty_config(perf);
struct perf_open_properties props = {
.engine = intel_engine_lookup_user(perf->i915,
I915_ENGINE_CLASS_RENDER,
@@ -25,13 +100,19 @@ test_stream(struct i915_perf *perf)
.sample_flags = SAMPLE_OA_REPORT,
.oa_format = IS_GEN(perf->i915, 12) ?
I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
- .metrics_set = 1,
};
struct i915_perf_stream *stream;
+ if (!oa_config)
+ return NULL;
+
+ props.metrics_set = oa_config->id;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
+ if (!stream) {
+ i915_oa_config_put(oa_config);
return NULL;
+ }
stream->perf = perf;
@@ -42,6 +123,8 @@ test_stream(struct i915_perf *perf)
}
mutex_unlock(&perf->lock);
+ i915_oa_config_put(oa_config);
+
return stream;
}
@@ -206,6 +289,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_noa_delay),
};
struct i915_perf *perf = &i915->perf;
+ int err;
if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
return 0;
@@ -213,5 +297,13 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ err = alloc_empty_config(&i915->perf);
+ if (err)
+ return err;
+
+ err = i915_subtests(tests, i915);
+
+ destroy_empty_config(&i915->perf);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..1dab0360f76a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -28,6 +28,7 @@
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
@@ -51,6 +52,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
return count;
}
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -58,7 +64,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
- request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
@@ -76,7 +82,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -145,7 +151,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -420,7 +426,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS0],
+ .engine = rcs0(i915),
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -1233,7 +1239,7 @@ static int live_parallel_engines(void *arg)
struct igt_live_test t;
unsigned int idx;
- snprintf(name, sizeof(name), "%pS", fn);
+ snprintf(name, sizeof(name), "%ps", fn);
err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2a1d4ba1f9f3..6e80d99048e4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -594,8 +594,11 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
void *addr;
obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
+ return ERR_PTR(-ENODEV);
return obj;
+ }
addr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(addr)) {
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 2b4407ac26de..9b105b811f1f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -192,11 +192,11 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->engine_mask = BIT(0);
- i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
- if (!i915->engine[RCS0])
+ i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->gt.engine[RCS0])
goto err_unlock;
- if (mock_engine_init(i915->engine[RCS0]))
+ if (mock_engine_init(i915->gt.engine[RCS0]))
goto err_context;
__clear_bit(I915_WEDGED, &i915->gt.reset.flags);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index a8b20557539b..ff43a3d80410 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -169,6 +170,9 @@ struct mtk_hdmi {
bool audio_enable;
bool powered;
bool enabled;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ struct mutex update_plugged_status_lock;
};
static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
@@ -1194,13 +1198,26 @@ static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
}
+static enum drm_connector_status
+mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
+{
+ bool connected;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ connected = mtk_cec_hpd_high(hdmi->cec_dev);
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, connected);
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ return connected ?
+ connector_status_connected : connector_status_disconnected;
+}
+
static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
bool force)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
-
- return mtk_cec_hpd_high(hdmi->cec_dev) ?
- connector_status_connected : connector_status_disconnected;
+ return mtk_hdmi_update_plugged_status(hdmi);
}
static void hdmi_conn_destroy(struct drm_connector *conn)
@@ -1657,20 +1674,39 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ mtk_hdmi_update_plugged_status(hdmi);
+
+ return 0;
+}
+
static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hw_params = mtk_hdmi_audio_hw_params,
.audio_startup = mtk_hdmi_audio_startup,
.audio_shutdown = mtk_hdmi_audio_shutdown,
.digital_mute = mtk_hdmi_audio_digital_mute,
.get_eld = mtk_hdmi_audio_get_eld,
+ .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
-static void mtk_hdmi_register_audio_driver(struct device *dev)
+static int mtk_hdmi_register_audio_driver(struct device *dev)
{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .data = hdmi,
};
struct platform_device *pdev;
@@ -1678,9 +1714,10 @@ static void mtk_hdmi_register_audio_driver(struct device *dev)
PLATFORM_DEVID_AUTO, &codec_data,
sizeof(codec_data));
if (IS_ERR(pdev))
- return;
+ return PTR_ERR(pdev);
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+ return 0;
}
static int mtk_drm_hdmi_probe(struct platform_device *pdev)
@@ -1706,6 +1743,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
+ mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
ret = mtk_hdmi_output_init(hdmi);
@@ -1714,7 +1752,11 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
- mtk_hdmi_register_audio_driver(dev);
+ ret = mtk_hdmi_register_audio_driver(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register audio driver: %d\n", ret);
+ return ret;
+ }
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7d9e63e20ded..724024a2243a 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
- u32 bin, val;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
- /* If a nvmem cell isn't defined, nothing to do */
- if (IS_ERR(cell))
- return;
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
- bin = *((u32 *) nvmem_cell_read(cell, NULL));
- nvmem_cell_put(cell);
+ val = (1 << bin);
+ kfree(buf);
+ }
- val = (1 << bin);
+ nvmem_cell_put(cell);
+ }
dev_pm_opp_set_supported_hw(dev, &val, 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 748cd379065f..c4e71abbdd53 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
- int count, i;
- u64 iova;
-
if (IS_ERR_OR_NULL(bo))
return;
- count = bo->size >> PAGE_SHIFT;
- iova = bo->iova;
-
- for (i = 0; i < count; i++, iova += PAGE_SIZE) {
- iommu_unmap(gmu->domain, iova, PAGE_SIZE);
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
+ dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
kfree(bo);
}
@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size)
{
struct a6xx_gmu_bo *bo;
- int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size);
- count = bo->size >> PAGE_SHIFT;
+ bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
- bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
- if (!bo->pages) {
+ if (!bo->virt) {
kfree(bo);
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < count; i++) {
- bo->pages[i] = alloc_page(GFP_KERNEL);
- if (!bo->pages[i])
- goto err;
- }
-
- bo->iova = gmu->uncached_iova_base;
-
- for (i = 0; i < count; i++) {
- ret = iommu_map(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- page_to_phys(bo->pages[i]), PAGE_SIZE,
- IOMMU_READ | IOMMU_WRITE);
-
- if (ret) {
- DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
-
- for (i = i - 1 ; i >= 0; i--)
- iommu_unmap(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- PAGE_SIZE);
-
- goto err;
- }
- }
-
- bo->virt = vmap(bo->pages, count, VM_IOREMAP,
- pgprot_writecombine(PAGE_KERNEL));
- if (!bo->virt)
- goto err;
-
- /* Align future IOVA addresses on 1MB boundaries */
- gmu->uncached_iova_base += ALIGN(size, SZ_1M);
-
return bo;
-
-err:
- for (i = 0; i < count; i++) {
- if (bo->pages[i])
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
- kfree(bo);
-
- return ERR_PTR(-ENOMEM);
-}
-
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
-{
- int ret;
-
- /*
- * The GMU address space is hardcoded to treat the range
- * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
- * between the GMU and the CPU will live in this space
- */
- gmu->uncached_iova_base = 0x60000000;
-
-
- gmu->domain = iommu_domain_alloc(&platform_bus_type);
- if (!gmu->domain)
- return -ENODEV;
-
- ret = iommu_attach_device(gmu->domain, gmu->dev);
-
- if (ret) {
- iommu_domain_free(gmu->domain);
- gmu->domain = NULL;
- }
-
- return ret;
}
/* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi);
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
-
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ /* Pass force_dma false to require the DT to set the dma region */
+ ret = of_dma_configure(gmu->dev, node, false);
+ if (ret)
+ return ret;
+
+ /* Set the mask after the of_dma_configure() */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- /* Set up the IOMMU context bank */
- ret = a6xx_gmu_memory_probe(gmu);
- if (ret)
- goto err_put_device;
-
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
@@ -1375,11 +1291,6 @@ err_mmio:
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
- if (gmu->domain) {
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
- }
ret = -ENODEV;
err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 2af91ed7ed0c..4af65a36d5ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,8 +12,7 @@
struct a6xx_gmu_bo {
void *virt;
size_t size;
- u64 iova;
- struct page **pages;
+ dma_addr_t iova;
};
/*
@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct iommu_domain *domain;
- u64 uncached_iova_base;
-
struct device *gxpd;
int idle_level;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e67c20c415af..24c974c293e5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7fd29829b2fa..1d5c43c22269 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return NULL;
for (i = 0; i < l; i++)
- buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 58d3400668f5..a1b79ee2bd9d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
* clks and resources after IDLE_TIMEOUT time.
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
- * @mode_set_complete: flag to indicate modeset completion
* @idle_timeout: idle timeout duration in milliseconds
*/
struct dpu_encoder_virt {
@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
- bool mode_set_complete;
u32 idle_timeout;
};
@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
@@ -562,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
+ struct dpu_global_state *global_state;
int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != 0, crtc_state != 0, conn_state != 0);
+ drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL;
}
@@ -578,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/*
@@ -609,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ /* Reserve dynamic resources now. */
if (!ret) {
/*
* Avoid reserving resources when mode set is pending. Topology
* info may not be available to complete reservation.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)
- && dpu_enc->mode_set_complete) {
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- topology, true);
- dpu_enc->mode_set_complete = false;
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
}
}
@@ -956,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate;
- struct dpu_rm_hw_iter hw_iter;
+ struct dpu_global_state *global_state;
struct msm_display_topology topology;
- struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm = 0, num_ctl = 0;
- int i, j, ret;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp;
+ int i, j;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -975,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ if (IS_ERR_OR_NULL(global_state)) {
+ DPU_ERROR("Failed to get global state");
+ return;
+ }
+
trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
@@ -995,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
- topology, false);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc,
- "failed to reserve hw resources, %d\n", ret);
- return;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
- num_ctl++;
- }
+ /* Query resource that have been reserved in atomic check step. */
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
- num_lm++;
- }
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+ : NULL;
cstate = to_dpu_crtc_state(drm_crtc->state);
for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
- cstate->mixers[i].hw_lm = hw_lm[i];
- cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
}
cstate->num_mixers = num_lm;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ int num_blk;
+ struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
- goto error;
+ return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
+ global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
+ hw_blk, ARRAY_SIZE(hw_blk));
+ for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
-
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ hw_intf = to_dpu_hw_intf(hw_blk[i]);
if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf;
}
@@ -1073,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc,
"no intf block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
-
- dpu_enc->mode_set_complete = true;
-
-error:
- dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1181,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
int i = 0;
if (!drm_enc) {
@@ -1199,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc));
@@ -1228,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- dpu_rm_release(&dpu_kms->rm, drm_enc);
+ dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -1964,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -1977,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -2008,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
- DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 39e1e280ba44..8493d68ad841 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return;
}
@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
u32 flush_mask = 0;
if (!phys_enc->hw_pp) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index c71c18de5966..b5a49050d131 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return;
}
@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 85468981632d..0ead64d3f63d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -90,6 +90,16 @@ struct dpu_hw_intf {
};
/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3d6f46b1db30..d73cb73e938b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -97,6 +97,16 @@ struct dpu_hw_pingpong {
};
/**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index cb08fafb1dc1..ce19f1d39367 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
- struct drm_device *dev;
- struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
if (!dpu_kms->mmio)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
}
#endif
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+ return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s,
+ &dpu_kms->global_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dpu_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+ kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+ .atomic_duplicate_state = dpu_kms_global_duplicate_state,
+ .atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct dpu_global_state *state;
+
+ drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+ &state->base,
+ &dpu_kms_global_state_funcs);
+ return 0;
+}
+
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct dpu_kms *dpu_kms;
- struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
if (!kms)
return;
- dpu_kms = to_dpu_kms(kms);
- dev = dpu_kms->dev;
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
- struct drm_device *dev;
int i;
- dev = dpu_kms->dev;
-
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
- struct msm_drm_private *priv;
int i, rc = -EINVAL;
if (!kms) {
@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- priv = dev->dev_private;
+
+ rc = dpu_kms_global_obj_init(dpu_kms);
+ if (rc)
+ return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0);
@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
- struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
- ddev = dpu_kms->dev;
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index c6169e7df19d..211f5de99a44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -111,6 +111,13 @@ struct dpu_kms {
struct dpu_core_perf perf;
+ /*
+ * Global private object state, Do not access directly, use
+ * dpu_kms_global_get_state()
+ */
+ struct drm_modeset_lock global_state_lock;
+ struct drm_private_obj global_state;
+
struct dpu_rm rm;
bool rm_init;
@@ -139,6 +146,25 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+ struct drm_private_state base;
+
+ uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+ uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+};
+
+struct dpu_global_state
+ *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+ *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
/**
* Debugfs functions - extra helper functions for debugfs support
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 23f5b1433b35..9b62451b01ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -12,8 +12,12 @@
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->enc_id && (h)->enc_id != r)
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+ uint32_t enc_id)
+{
+ return res_map[idx] && res_map[idx] != enc_id;
+}
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -25,171 +29,43 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res;
};
-
-/**
- * struct dpu_rm_hw_blk - hardware block tracking list member
- * @list: List head for list of all hardware blocks tracking items
- * @id: Hardware ID number, within it's own space, ie. LM_X
- * @enc_id: Encoder id to which this blk is binded
- * @hw: Pointer to the hardware register access object for this block
- */
-struct dpu_rm_hw_blk {
- struct list_head list;
- uint32_t id;
- uint32_t enc_id;
- struct dpu_hw_blk *hw;
-};
-
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type)
-{
- memset(iter, 0, sizeof(*iter));
- iter->enc_id = enc_id;
- iter->type = type;
-}
-
-static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+int dpu_rm_destroy(struct dpu_rm *rm)
{
- struct list_head *blk_list;
-
- if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
- DPU_ERROR("invalid rm\n");
- return false;
- }
+ int i;
- i->hw = NULL;
- blk_list = &rm->hw_blks[i->type];
+ for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+ struct dpu_hw_pingpong *hw;
- if (i->blk && (&i->blk->list == blk_list)) {
- DPU_DEBUG("attempt resume iteration past last\n");
- return false;
- }
-
- i->blk = list_prepare_entry(i->blk, blk_list, list);
-
- list_for_each_entry_continue(i->blk, blk_list, list) {
- if (i->enc_id == i->blk->enc_id) {
- i->hw = i->blk->hw;
- DPU_DEBUG("found type %d id %d for enc %d\n",
- i->type, i->blk->id, i->enc_id);
- return true;
+ if (rm->pingpong_blks[i]) {
+ hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+ dpu_hw_pingpong_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+ struct dpu_hw_mixer *hw;
- DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
-
- return false;
-}
-
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
- bool ret;
-
- mutex_lock(&rm->rm_lock);
- ret = _dpu_rm_get_hw_locked(rm, i);
- mutex_unlock(&rm->rm_lock);
-
- return ret;
-}
-
-static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
-{
- switch (type) {
- case DPU_HW_BLK_LM:
- dpu_hw_lm_destroy(hw);
- break;
- case DPU_HW_BLK_CTL:
- dpu_hw_ctl_destroy(hw);
- break;
- case DPU_HW_BLK_PINGPONG:
- dpu_hw_pingpong_destroy(hw);
- break;
- case DPU_HW_BLK_INTF:
- dpu_hw_intf_destroy(hw);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- break;
- }
-}
-
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
- list) {
- list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(type, hw_cur->hw);
- kfree(hw_cur);
+ if (rm->mixer_blks[i]) {
+ hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+ dpu_hw_lm_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+ struct dpu_hw_ctl *hw;
- mutex_destroy(&rm->rm_lock);
-
- return 0;
-}
-
-static int _dpu_rm_hw_blk_create(
- struct dpu_rm *rm,
- const struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- enum dpu_hw_blk_type type,
- uint32_t id,
- const void *hw_catalog_info)
-{
- struct dpu_rm_hw_blk *blk;
- void *hw;
-
- switch (type) {
- case DPU_HW_BLK_LM:
- hw = dpu_hw_lm_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_CTL:
- hw = dpu_hw_ctl_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_PINGPONG:
- hw = dpu_hw_pingpong_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_INTF:
- hw = dpu_hw_intf_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- return -EINVAL;
- }
-
- if (IS_ERR_OR_NULL(hw)) {
- DPU_ERROR("failed hw object creation: type %d, err %ld\n",
- type, PTR_ERR(hw));
- return -EFAULT;
+ if (rm->ctl_blks[i]) {
+ hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+ dpu_hw_ctl_destroy(hw);
+ }
}
+ for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
+ struct dpu_hw_intf *hw;
- blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (!blk) {
- _dpu_rm_hw_destroy(type, hw);
- return -ENOMEM;
+ if (rm->intf_blks[i]) {
+ hw = to_dpu_hw_intf(rm->intf_blks[i]);
+ dpu_hw_intf_destroy(hw);
+ }
}
- blk->id = id;
- blk->hw = hw;
- blk->enc_id = 0;
- list_add_tail(&blk->list, &rm->hw_blks[type]);
-
return 0;
}
@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm,
void __iomem *mmio)
{
int rc, i;
- enum dpu_hw_blk_type type;
if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
- mutex_init(&rm->rm_lock);
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++)
- INIT_LIST_HEAD(&rm->hw_blks[type]);
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm,
continue;
}
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
- cat->mixer[i].id, &cat->mixer[i]);
- if (rc) {
- DPU_ERROR("failed: lm hw not available\n");
+ if (lm->id < LM_0 || lm->id >= LM_MAX) {
+ DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
+ continue;
+ }
+ hw = dpu_hw_lm_init(lm->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail;
}
+ rm->mixer_blks[lm->id - LM_0] = &hw->base;
if (!rm->lm_max_width) {
rm->lm_max_width = lm->sblk->maxwidth;
@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm,
}
for (i = 0; i < cat->pingpong_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
- cat->pingpong[i].id, &cat->pingpong[i]);
- if (rc) {
- DPU_ERROR("failed: pp hw not available\n");
+ struct dpu_hw_pingpong *hw;
+ const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+ if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
+ DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
+ continue;
+ }
+ hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed pingpong object creation: err %d\n",
+ rc);
goto fail;
}
+ rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
}
for (i = 0; i < cat->intf_count; i++) {
- if (cat->intf[i].type == INTF_NONE) {
+ struct dpu_hw_intf *hw;
+ const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+ if (intf->type == INTF_NONE) {
DPU_DEBUG("skip intf %d with type none\n", i);
continue;
}
-
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
- cat->intf[i].id, &cat->intf[i]);
- if (rc) {
- DPU_ERROR("failed: intf hw not available\n");
+ if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", intf->id);
+ continue;
+ }
+ hw = dpu_hw_intf_init(intf->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail;
}
+ rm->intf_blks[intf->id - INTF_0] = &hw->base;
}
for (i = 0; i < cat->ctl_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
- cat->ctl[i].id, &cat->ctl[i]);
- if (rc) {
- DPU_ERROR("failed: ctl hw not available\n");
+ struct dpu_hw_ctl *hw;
+ const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+ if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
+ DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
+ continue;
+ }
+ hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail;
}
+ rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
return 0;
@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm,
fail:
dpu_rm_destroy(rm);
- return rc;
+ return rc ? rc : -EFAULT;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
@@ -288,85 +188,81 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
}
/**
+ * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ * @peer_idx: index of other mixer in rm->mixer_blks[]
+ * @Return: true if rm->mixer_blks[peer_idx] is a peer of
+ * rm->mixer_blks[primary_idx]
+ */
+static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
+ int peer_idx)
+{
+ const struct dpu_lm_cfg *prim_lm_cfg;
+ const struct dpu_lm_cfg *peer_cfg;
+
+ prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+ peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
+
+ if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
+ peer_cfg->id);
+ return false;
+ }
+ return true;
+}
+
+/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @enc_id: encoder id requesting for allocation
- * @reqs: proposed use case requirements
- * @lm: proposed layer mixer, function checks if lm, and all other hardwired
- * blocks connected to the lm (pp) is available and appropriate
- * @pp: output parameter, pingpong block attached to the layer mixer.
- * NULL if pp was not available, or not matching requirements.
- * @primary_lm: if non-null, this function check if lm is compatible primary_lm
- * as well as satisfying all other requirements
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ * if lm, and all other hardwired blocks connected to the lm (pp) is
+ * available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ * mixer in rm->pongpong_blks[].
* @Return: true if lm matches all requirements, false otherwise
*/
-static bool _dpu_rm_check_lm_and_get_connected_blks(
- struct dpu_rm *rm,
- uint32_t enc_id,
- struct dpu_rm_requirements *reqs,
- struct dpu_rm_hw_blk *lm,
- struct dpu_rm_hw_blk **pp,
- struct dpu_rm_hw_blk *primary_lm)
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id, int lm_idx, int *pp_idx)
{
- const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
- struct dpu_rm_hw_iter iter;
-
- *pp = NULL;
-
- DPU_DEBUG("check lm %d pp %d\n",
- lm_cfg->id, lm_cfg->pingpong);
-
- /* Check if this layer mixer is a peer of the proposed primary LM */
- if (primary_lm) {
- const struct dpu_lm_cfg *prim_lm_cfg =
- to_dpu_hw_mixer(primary_lm->hw)->cap;
-
- if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
- DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
- prim_lm_cfg->id);
- return false;
- }
- }
+ const struct dpu_lm_cfg *lm_cfg;
+ int idx;
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, enc_id)) {
- DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id == lm_cfg->pingpong) {
- *pp = iter.blk;
- break;
- }
- }
-
- if (!*pp) {
+ lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+ idx = lm_cfg->pingpong - PINGPONG_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
return false;
}
- if (RESERVED_BY_OTHER(*pp, enc_id)) {
- DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
- (*pp)->id);
+ if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+ lm_cfg->pingpong);
return false;
}
-
+ *pp_idx = idx;
return true;
}
-static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs)
{
- struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
- struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter_i, iter_j;
- int lm_count = 0;
- int i, rc = 0;
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
}
/* Find a primary mixer */
- dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_i)) {
- memset(&lm, 0, sizeof(lm));
- memset(&pp, 0, sizeof(pp));
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; i++) {
+ if (!rm->mixer_blks[i])
+ continue;
lm_count = 0;
- lm[lm_count] = iter_i.blk;
+ lm_idx[lm_count] = i;
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, lm[lm_count],
- &pp[lm_count], NULL))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+ enc_id, i, &pp_idx[lm_count])) {
continue;
+ }
++lm_count;
/* Valid primary mixer found, find matching peers */
- dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+ for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; j++) {
+ if (!rm->mixer_blks[j])
+ continue;
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_j)) {
- if (iter_i.blk == iter_j.blk)
+ if (!_dpu_rm_check_lm_peer(rm, i, j)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
+ LM_0 + i);
continue;
+ }
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, iter_j.blk,
- &pp[lm_count], iter_i.blk))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+ global_state, enc_id, j,
+ &pp_idx[lm_count])) {
continue;
+ }
- lm[lm_count] = iter_j.blk;
+ lm_idx[lm_count] = j;
++lm_count;
}
}
@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
return -ENAVAIL;
}
- for (i = 0; i < ARRAY_SIZE(lm); i++) {
- if (!lm[i])
- break;
+ for (i = 0; i < lm_count; i++) {
+ global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+ global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- lm[i]->enc_id = enc_id;
- pp[i]->enc_id = enc_id;
-
- trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ pp_idx[i] + PINGPONG_0);
}
- return rc;
+ return 0;
}
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
const struct msm_display_topology *top)
{
- struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter;
- int i = 0, num_ctls = 0;
- bool needs_split_display = false;
-
- memset(&ctls, 0, sizeof(ctls));
+ int ctl_idx[MAX_BLOCKS];
+ int i = 0, j, num_ctls;
+ bool needs_split_display;
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
- unsigned long features = ctl->caps->features;
+ for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+ const struct dpu_hw_ctl *ctl;
+ unsigned long features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, enc_id))
+ if (!rm->ctl_blks[j])
+ continue;
+ if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
continue;
+ ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+ features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
- DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+ DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
if (needs_split_display != has_split_display)
continue;
- ctls[i] = iter.blk;
- DPU_DEBUG("ctl %d match\n", iter.blk->id);
+ ctl_idx[i] = j;
+ DPU_DEBUG("ctl %d match\n", j + CTL_0);
if (++i == num_ctls)
break;
+
}
if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
- ctls[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
+ for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+ global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
}
return 0;
@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
- uint32_t id,
- enum dpu_hw_blk_type type)
+ uint32_t id)
{
- struct dpu_rm_hw_iter iter;
- int ret = 0;
-
- /* Find the block entry in the rm, and note the reservation */
- dpu_rm_init_hw_iter(&iter, 0, type);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id != id)
- continue;
+ int idx = id - INTF_0;
- if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
- DPU_ERROR("type %d id %d already reserved\n", type, id);
- return -ENAVAIL;
- }
-
- iter.blk->enc_id = enc_id;
- trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
- break;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
+ DPU_ERROR("invalid intf id: %d", id);
+ return -EINVAL;
}
- /* Shouldn't happen since intfs are fixed at probe */
- if (!iter.hw) {
- DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ if (!rm->intf_blks[idx]) {
+ DPU_ERROR("couldn't find intf id %d\n", id);
return -EINVAL;
}
- return ret;
+ if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
+ DPU_ERROR("intf id %d already reserved\n", id);
+ return -ENAVAIL;
+ }
+
+ global_state->intf_to_enc_id[idx] = enc_id;
+ return 0;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, enc_id, id,
- DPU_HW_BLK_INTF);
+ ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
if (ret)
return ret;
}
@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw(
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
+ &reqs->hw_res);
if (ret)
return ret;
@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation(
}
static int _dpu_rm_populate_requirements(
- struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+ uint32_t enc_id)
{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->enc_id == enc_id) {
- blk->enc_id = 0;
- DPU_DEBUG("rel enc %d %d %d\n", enc_id,
- type, blk->id);
- }
- }
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (res_mapping[i] == enc_id)
+ res_mapping[i] = 0;
}
}
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
{
- mutex_lock(&rm->rm_lock);
-
- _dpu_rm_release_reservation(rm, enc->base.id);
-
- mutex_unlock(&rm->rm_lock);
+ _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+ ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+ ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+ ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->intf_to_enc_id,
+ ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only)
+ struct msm_display_topology topology)
{
struct dpu_rm_requirements reqs;
int ret;
@@ -616,31 +508,75 @@ int dpu_rm_reserve(
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
- enc->base.id, crtc_state->crtc->base.id, test_only);
+ if (IS_ERR(global_state)) {
+ DPU_ERROR("failed to global state\n");
+ return PTR_ERR(global_state);
+ }
- mutex_lock(&rm->rm_lock);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+ enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
- topology);
+ ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
- goto end;
+ return ret;
}
- ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
- if (ret) {
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_reservation(rm, enc->base.id);
- } else if (test_only) {
- /* test_only: test the reservation and then undo */
- DPU_DEBUG("test_only: discard test [enc: %d]\n",
- enc->base.id);
- _dpu_rm_release_reservation(rm, enc->base.id);
- }
-end:
- mutex_unlock(&rm->rm_lock);
+
return ret;
}
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+ struct dpu_hw_blk **hw_blks;
+ uint32_t *hw_to_enc_id;
+ int i, num_blks, max_blks;
+
+ switch (type) {
+ case DPU_HW_BLK_PINGPONG:
+ hw_blks = rm->pingpong_blks;
+ hw_to_enc_id = global_state->pingpong_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->pingpong_blks);
+ break;
+ case DPU_HW_BLK_LM:
+ hw_blks = rm->mixer_blks;
+ hw_to_enc_id = global_state->mixer_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->mixer_blks);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw_blks = rm->ctl_blks;
+ hw_to_enc_id = global_state->ctl_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->ctl_blks);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw_blks = rm->intf_blks;
+ hw_to_enc_id = global_state->intf_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->intf_blks);
+ break;
+ default:
+ DPU_ERROR("blk type %d not managed by rm\n", type);
+ return 0;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < max_blks; i++) {
+ if (hw_to_enc_id[i] != enc_id)
+ continue;
+
+ if (num_blks == blks_size) {
+ DPU_ERROR("More than %d resources assigned to enc %d\n",
+ blks_size, enc_id);
+ break;
+ }
+ blks[num_blks++] = hw_blks[i];
+ }
+
+ return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9c580a017094..6d2b04f306f0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -11,37 +11,24 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
+struct dpu_global_state;
+
/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @hw_blks: array of lists of hardware resources present in the system, one
- * list per type of hardware block
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @intf_blks: array of intf hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct list_head hw_blks[DPU_HW_BLK_MAX];
- uint32_t lm_max_width;
- struct mutex rm_lock;
-};
+ struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+ struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+ struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+ struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
-/**
- * struct dpu_rm_hw_blk - resource manager internal structure
- * forward declaration for single iterator definition without void pointer
- */
-struct dpu_rm_hw_blk;
-
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @hw: dpu_hw object requested, or NULL on failure
- * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
- void *hw;
- struct dpu_rm_hw_blk *blk;
- uint32_t enc_id;
- enum dpu_hw_blk_type type;
+ uint32_t lm_max_width;
};
/**
@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @topology: Pointer to topology info for the display
- * @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only);
+ struct msm_display_topology topology);
/**
* dpu_rm_reserve - Given the encoder for the display chain, release any
@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR
*/
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc);
/**
- * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
- * using dpu_rm_get_hw
- * @iter: iter object to initialize
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type);
-/**
- * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
- * Meant to do a single pass through the hardware list to iteratively
- * retrieve hardware blocks of a given type for a given encoder.
- * Initialize an iterator object.
- * Set hw block type of interest. Set encoder id of interest, 0 for any.
- * Function returns first hw of type for that encoder.
- * Subsequent calls will return the next reserved hw of that type in-order.
- * Iterator HW pointer will be null on failure to find hw.
- * @rm: DPU Resource Manager handle
- * @iter: iterator object
- * @Return: true on match found, false on no match found
+ * Get hw resources of the given type that are assigned to this encoder.
*/
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
#endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 93ab36bd8df3..5e8c3f3e6625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
u32 val;
if (!vbif || !vbif->cap) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
- vbif != 0, mdp != 0);
+ vbif != NULL, mdp != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 41b461128bbc..c902c6503675 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -625,7 +625,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (config->platform.iommu) {
iommu_dev = &pdev->dev;
- if (!iommu_dev->iommu_fwspec)
+ if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
aspace = msm_gem_address_space_create(iommu_dev,
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index a78d6077802b..106a67473af5 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 3a8646535c14..737453b6e596 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 2a82c23a6e4d..29295dee2a2e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_msm_uninit;
+ }
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9e0953c2b7ce..30584eaf8cc8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -157,7 +157,17 @@ struct msm_gem_submit {
uint32_t handle;
};
uint64_t iova;
- } bos[0];
+ } bos[];
};
+/* helper to determine of a buffer in submit should be dumped, used for both
+ * devcoredump and debugfs cmdstream dumping:
+ */
+static inline bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ extern bool rd_full;
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 18f3a5c53ffb..615c5cda5389 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->cmd = kstrdup(cmd, GFP_KERNEL);
if (submit) {
- int i;
-
- state->bos = kcalloc(submit->nr_cmds,
+ int i, nr = 0;
+
+ /* count # of buffers to dump: */
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ nr++;
+ /* always dump cmd bo's, but don't double count them: */
+ for (i = 0; i < submit->nr_cmds; i++)
+ if (!should_dump(submit, submit->cmd[i].idx))
+ nr++;
+
+ state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (should_dump(submit, i)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+ }
+
for (i = 0; state->bos && i < submit->nr_cmds; i++) {
int idx = submit->cmd[i].idx;
- msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
- submit->bos[idx].iova, submit->bos[idx].flags);
+ if (!should_dump(submit, submit->cmd[i].idx)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index af7ceb246c7c..732f65df5c4f 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -43,7 +43,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
-static bool rd_full = false;
+bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_put_vaddr(&obj->base);
}
-static bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index e8eef88a8382..ffdd447d8706 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -35,7 +35,8 @@
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
-#include <subdev/timer.h>
+
+#include <nvif/timer.h>
int nv04_dac_output_offset(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 3fdfafa8b0ad..b674d68ef28a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -26,6 +26,7 @@
#include "hw.h"
#include <subdev/bios/pll.h>
+#include <nvif/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 00a85f1e1a4a..ee782151d332 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -23,6 +23,7 @@
#include <nvif/cl507c.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e7fcfa6e6467..c5152c39c684 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -23,6 +23,7 @@
#include "head.h"
#include <nvif/cl507d.h>
+#include <nvif/timer.h>
#include "nouveau_bo.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 3b36dc8d36b2..c03cb987856b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
void
corec37d_wndw_owner(struct nv50_core *core)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 397143b639c6..8c5cf096f69b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -24,21 +24,36 @@
#include "head.h"
#include <nvif/cl507a.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+bool
+curs507a_space(struct nv50_wndw *wndw)
+{
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
+ return true;
+ );
+ WARN_ON(1);
+ return false;
+}
+
static void
curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
}
static void
curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
index 23fb29d41efe..96dff4f09f57 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -25,14 +25,17 @@
static void
cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
}
static void
cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
static const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 4d1c58468dbc..6be9df1820c5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -45,6 +45,7 @@
#include <nvif/cl5070.h>
#include <nvif/cl507d.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 2e68fc736fe1..4f7ce57f2036 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
static void
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index caf397475918..a7412b9d3a98 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -97,6 +97,7 @@ struct nv50_wimm_func {
};
extern const struct nv50_wimm_func curs507a;
+bool curs507a_space(struct nv50_wndw *);
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 25d969dcf67d..c2a572c67a76 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -23,27 +23,6 @@ int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
void nvif_device_fini(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-/* Delay based on GPU time (ie. PTIMER).
- *
- * Will return -ETIMEDOUT unless the loop was terminated with 'break',
- * where it will return the number of nanoseconds taken instead.
- */
-#define nvif_nsec(d,n,cond...) ({ \
- struct nvif_device *_device = (d); \
- u64 _nsecs = (n), _time0 = nvif_device_time(_device); \
- s64 _taken = 0; \
- \
- do { \
- cond \
- } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
- \
- if (_taken >= _nsecs) \
- _taken = -ETIMEDOUT; \
- _taken; \
-})
-#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
-#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
-
/*XXX*/
#include <subdev/bios.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/timer.h b/drivers/gpu/drm/nouveau/include/nvif/timer.h
new file mode 100644
index 000000000000..57587a985c4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/timer.h
@@ -0,0 +1,35 @@
+#ifndef __NVIF_TIMER_H__
+#define __NVIF_TIMER_H__
+#include <nvif/os.h>
+
+struct nvif_timer_wait {
+ struct nvif_device *device;
+ u64 limit;
+ u64 time0;
+ u64 time1;
+ int reads;
+};
+
+void nvif_timer_wait_init(struct nvif_device *, u64 nsec,
+ struct nvif_timer_wait *);
+s64 nvif_timer_wait_test(struct nvif_timer_wait *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({ \
+ struct nvif_timer_wait _wait; \
+ s64 _taken = 0; \
+ \
+ nvif_timer_wait_init((d), (n), &_wait); \
+ do { \
+ cond \
+ } while ((_taken = nvif_timer_wait_test(&_wait)) >= 0); \
+ \
+ _taken; \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
index 03c11826b693..6825574d93c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/user.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -10,6 +10,7 @@ struct nvif_user {
struct nvif_user_func {
void (*doorbell)(struct nvif_user *, u32 token);
+ u64 (*time)(struct nvif_user *);
};
int nvif_user_init(struct nvif_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2b4b21b02e40..c40f127de3d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1494,8 +1494,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
ret = nvif_object_map_handle(&mem->mem.object,
&args, argc,
&handle, &length);
- if (ret != 1)
- return ret ? ret : -EINVAL;
+ if (ret != 1) {
+ if (WARN_ON(ret == 0))
+ return -EINVAL;
+ if (ret == -ENOSPC)
+ return -EAGAIN;
+ return ret;
+ }
reg->bus.base = 0;
reg->bus.offset = handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 0ad5d87b5a8e..ad89e09a0be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -28,6 +28,7 @@
#include <nvif/class.h>
#include <nvif/object.h>
+#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
@@ -176,6 +177,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.end = vmf->address + PAGE_SIZE,
.src = &src,
.dst = &dst,
+ .src_owner = drm->dev,
};
/*
@@ -526,6 +528,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
drm->dmem->pagemap.res = *res;
drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ drm->dmem->pagemap.owner = drm->dev;
if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
goto out_free;
@@ -669,12 +672,6 @@ out:
return ret;
}
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
@@ -690,18 +687,12 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (page == NULL)
continue;
- if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ if (!is_device_private_page(page))
continue;
- }
-
- if (!nouveau_dmem_page(drm, page)) {
- WARN(1, "Some unknown device memory !\n");
- range->pfns[i] = 0;
- continue;
- }
addr = nouveau_dmem_page_addr(page);
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+ range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6b1629c14dd7..ca4087f5a15b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev)
kfree(drm);
}
+/*
+ * On some Intel PCIe bridge controllers doing a
+ * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear.
+ * Skipping the intermediate D3hot step seems to make it work again. This is
+ * probably caused by not meeting the expectation the involved AML code has
+ * when the GPU is put into D3hot state before invoking it.
+ *
+ * This leads to various manifestations of this issue:
+ * - AML code execution to power on the GPU hits an infinite loop (as the
+ * code waits on device memory to change).
+ * - kernel crashes, as all PCI reads return -1, which most code isn't able
+ * to handle well enough.
+ *
+ * In all cases dmesg will contain at least one line like this:
+ * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3'
+ * followed by a lot of nouveau timeouts.
+ *
+ * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not
+ * documented PCI config space register 0x248 of the Intel PCIe bridge
+ * controller (0x1901) in order to change the state of the PCIe link between
+ * the PCIe port and the GPU. There are alternative code paths using other
+ * registers, which seem to work fine (executed pre Windows 8):
+ * - 0xbc bit 0x20 (publicly available documentation claims 'reserved')
+ * - 0xb0 bit 0x10 (link disable)
+ * Changing the conditions inside the firmware by poking into the relevant
+ * addresses does resolve the issue, but it seemed to be ACPI private memory
+ * and not any device accessible memory at all, so there is no portable way of
+ * changing the conditions.
+ * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared.
+ *
+ * The only systems where this behavior can be seen are hybrid graphics laptops
+ * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether
+ * this issue only occurs in combination with listed Intel PCIe bridge
+ * controllers and the mentioned GPUs or other devices as well.
+ *
+ * documentation on the PCIe bridge controller can be found in the
+ * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2"
+ * Section "12 PCI Express* Controller (x16) Registers"
+ */
+
+static void quirk_broken_nv_runpm(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+
+ if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (bridge->device) {
+ case 0x1901:
+ drm->old_pm_cap = pdev->pm_cap;
+ pdev->pm_cap = 0;
+ NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
+ break;
+ }
+}
+
static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
@@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm_dev_init;
+ quirk_broken_nv_runpm(pdev);
return 0;
fail_drm_dev_init:
@@ -734,7 +793,11 @@ static void
nouveau_drm_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ /* revert our workaround */
+ if (drm->old_pm_cap)
+ pdev->pm_cap = drm->old_pm_cap;
nouveau_drm_device_remove(dev);
pci_disable_device(pdev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c2c332fbde97..2a6519737800 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -140,6 +140,8 @@ struct nouveau_drm {
struct list_head clients;
+ u8 old_pm_cap;
+
struct {
struct agp_bridge_data *bridge;
u32 base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index df9bf1fd1bc0..645fedd77e21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
mm = get_task_mm(current);
down_read(&mm->mmap_sem);
+ if (!cli->svm.svmm) {
+ up_read(&mm->mmap_sem);
+ return -EINVAL;
+ }
+
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
@@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (!vma)
break;
+ addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
@@ -367,7 +373,6 @@ static const u64
nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
[HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
[HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
- [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
};
static const u64
@@ -541,7 +546,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, 0);
+ ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
@@ -657,9 +662,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
if (start < svmm->unmanaged.limit)
limit = min_t(u64, limit, svmm->unmanaged.start);
- else
- if (limit > svmm->unmanaged.start)
- start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
mm = svmm->notifier.mm;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 50d583d63807..f194d354c1f5 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -8,6 +8,7 @@ nvif-y += nvif/fifo.o
nvif-y += nvif/mem.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
+nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
# Usermode classes
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 1ec101ba3b42..0e92db44bbc8 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -27,11 +27,15 @@
u64
nvif_device_time(struct nvif_device *device)
{
- struct nv_device_time_v0 args = {};
- int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
- &args, sizeof(args));
- WARN_ON_ONCE(ret != 0);
- return args.time;
+ if (!device->user.func) {
+ struct nv_device_time_v0 args = {};
+ int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+ &args, sizeof(args));
+ WARN_ON_ONCE(ret != 0);
+ return args.time;
+ }
+
+ return device->user.func->time(&device->user);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvif/timer.c b/drivers/gpu/drm/nouveau/nvif/timer.c
new file mode 100644
index 000000000000..602c1a258d10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/timer.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/timer.h>
+#include <nvif/device.h>
+
+s64
+nvif_timer_wait_test(struct nvif_timer_wait *wait)
+{
+ u64 time = nvif_device_time(wait->device);
+
+ if (wait->reads == 0) {
+ wait->time0 = time;
+ wait->time1 = time;
+ }
+
+ if (wait->time1 == time) {
+ if (WARN_ON(wait->reads++ == 16))
+ return -ETIMEDOUT;
+ } else {
+ wait->time1 = time;
+ wait->reads = 1;
+ }
+
+ if (wait->time1 - wait->time0 > wait->limit)
+ return -ETIMEDOUT;
+
+ return wait->time1 - wait->time0;
+}
+
+void
+nvif_timer_wait_init(struct nvif_device *device, u64 nsec,
+ struct nvif_timer_wait *wait)
+{
+ wait->device = device;
+ wait->limit = nsec;
+ wait->reads = 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c
index 19f9958e7e01..1116f871b272 100644
--- a/drivers/gpu/drm/nouveau/nvif/userc361.c
+++ b/drivers/gpu/drm/nouveau/nvif/userc361.c
@@ -21,6 +21,19 @@
*/
#include <nvif/user.h>
+static u64
+nvif_userc361_time(struct nvif_user *user)
+{
+ u32 hi, lo;
+
+ do {
+ hi = nvif_rd32(&user->object, 0x084);
+ lo = nvif_rd32(&user->object, 0x080);
+ } while (hi != nvif_rd32(&user->object, 0x084));
+
+ return ((u64)hi << 32 | lo);
+}
+
static void
nvif_userc361_doorbell(struct nvif_user *user, u32 token)
{
@@ -30,4 +43,5 @@ nvif_userc361_doorbell(struct nvif_user *user, u32 token)
const struct nvif_user_func
nvif_userc361 = {
.doorbell = nvif_userc361_doorbell,
+ .time = nvif_userc361_time,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dd8f85b8b3a7..f2f5636efac4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &base->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool reset = device->chipset == 0x137 || device->chipset == 0x138;
u32 ret;
+ /* On certain GP107/GP108 boards, we trigger a weird issue where
+ * GR will stop responding to PRI accesses after we've asked the
+ * SEC2 RTOS to boot the GR falcons. This happens with far more
+ * frequency when cold-booting a board (ie. returning from D3).
+ *
+ * The root cause for this is not known and has proven difficult
+ * to isolate, with many avenues being dead-ends.
+ *
+ * A workaround was discovered by Karol, whereby putting GR into
+ * reset for an extended period right before initialisation
+ * prevents the problem from occuring.
+ *
+ * XXX: As RM does not require any such workaround, this is more
+ * of a hack than a true fix.
+ */
+ reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset);
+ if (reset) {
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+ nvkm_rd32(device, 0x000200);
+ msleep(50);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+ nvkm_rd32(device, 0x000200);
+ }
+
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index 232a9d7c51e5..e770c9497871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -25,6 +25,9 @@
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
static const struct nvkm_sec2_fwif
gp108_sec2_fwif[] = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index b6ebd95c9ba1..a8295653ceab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
return 0;
}
+MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
+
static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 9b91da09dc5f..8d9812a51ef6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
else
return ERR_PTR(-ENODEV);
+ if (!pdev->rom || pdev->romlen == 0)
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ priv->size = pdev->romlen;
if (ret = -ENODEV,
- (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
return priv;
kfree(priv);
}
@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(ret);
}
+static void
+platform_fini(void *data)
+{
+ struct priv *priv = data;
+
+ iounmap(priv->rom);
+ kfree(priv);
+}
+
const struct nvbios_source
nvbios_platform = {
.name = "PLATFORM",
.init = platform_init,
- .fini = (void(*)(void *))kfree,
+ .fini = platform_fini,
.read = pcirom_read,
.rw = true,
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index b76fc2b56227..4d5739fa4a5d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1348,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data)
return dev == child;
}
+struct dss_component_match_data {
+ struct device *dev;
+ struct component_match **match;
+};
+
static int dss_add_child_component(struct device *dev, void *data)
{
- struct component_match **match = data;
+ struct dss_component_match_data *cmatch = data;
+ struct component_match **match = cmatch->match;
/*
* HACK
@@ -1361,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data)
if (strstr(dev_name(dev), "rfbi"))
return 0;
- component_match_add(dev->parent, match, dss_component_compare, dev);
+ /*
+ * Handle possible interconnect target modules defined within the DSS.
+ * The DSS components can be children of an interconnect target module
+ * after the device tree has been updated for the module data.
+ * See also omapdss_boot_init() for compatible fixup.
+ */
+ if (strstr(dev_name(dev), "target-module"))
+ return device_for_each_child(dev, cmatch,
+ dss_add_child_component);
+
+ component_match_add(cmatch->dev, match, dss_component_compare, dev);
return 0;
}
@@ -1404,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss)
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
+ struct dss_component_match_data cmatch;
struct component_match *match = NULL;
struct resource *dss_mem;
struct dss_device *dss;
@@ -1481,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev)
omapdss_gather_components(&pdev->dev);
- device_for_each_child(&pdev->dev, &match, dss_add_child_component);
+ cmatch.dev = &pdev->dev;
+ cmatch.match = &match;
+ device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 00372f4ce711..72a7da7bfff1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -178,9 +178,24 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
{},
};
+static void __init omapdss_find_children(struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "compatible", NULL))
+ continue;
+
+ omapdss_walk_device(child, true);
+
+ if (of_device_is_compatible(child, "ti,sysc"))
+ omapdss_find_children(child);
+ }
+}
+
static int __init omapdss_boot_init(void)
{
- struct device_node *dss, *child;
+ struct device_node *dss;
INIT_LIST_HEAD(&dss_conv_list);
@@ -190,13 +205,7 @@ static int __init omapdss_boot_init(void)
goto put_node;
omapdss_walk_device(dss, true);
-
- for_each_available_child_of_node(dss, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
- }
+ omapdss_find_children(dss);
while (!list_empty(&dss_conv_list)) {
struct dss_conv_node *n;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 44a1f5dfb571..003b54ea90d5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -361,7 +361,6 @@ static int panel_dpi_probe(struct device *dev,
struct panel_desc *desc;
unsigned int bus_flags;
struct videomode vm;
- const char *mapping;
int ret;
np = dev->of_node;
@@ -386,16 +385,6 @@ static int panel_dpi_probe(struct device *dev,
of_property_read_u32(np, "width-mm", &desc->size.width);
of_property_read_u32(np, "height-mm", &desc->size.height);
- of_property_read_string(np, "data-mapping", &mapping);
- if (!strcmp(mapping, "rgb24"))
- desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- else if (!strcmp(mapping, "rgb565"))
- desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
- else if (!strcmp(mapping, "bgr666"))
- desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
- else if (!strcmp(mapping, "lvds666"))
- desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
-
/* Extract bus_flags from display_timing */
bus_flags = 0;
vm.flags = timing->flags;
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
index 403eb3a5891f..9c1a94153983 100644
--- a/drivers/gpu/drm/radeon/.gitignore
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mkregtable
*_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c42f73fad3e3..bb29cf02974d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_read_platform_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = rdev->pdev->rom;
+ size_t romlen = rdev->pdev->romlen;
+ void __iomem *bios;
rdev->bios = NULL;
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ rdev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!rdev->bios)
return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
+
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
+
+ memcpy_fromio(rdev->bios, bios, romlen);
+ iounmap(bios);
+
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
+ goto free_bios;
return true;
+free_bios:
+ kfree(rdev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index badf1b6d1549..5d50c9edbe80 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -523,7 +523,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
r = -ENOMEM;
nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- if (nents != ttm->sg->nents)
+ if (nents == 0)
goto release_sg;
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 05e8b4d0af3f..2cb85dbe728f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,7 +2979,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0xC3) ||
(rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 848522797314..ade2327a10e2 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -322,15 +322,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
- const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
- dp_data = of_device_get_match_data(dev);
- if (!dp_data)
- return -ENODEV;
-
- dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
@@ -341,16 +335,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = dp->data->chip_type;
- dp->plat_data.power_on_start = rockchip_dp_poweron_start;
- dp->plat_data.power_off = rockchip_dp_powerdown;
- dp->plat_data.get_modes = rockchip_dp_get_modes;
-
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
- ret = PTR_ERR(dp->adp);
+ ret = analogix_dp_bind(dp->adp, drm_dev);
+ if (ret)
goto err_cleanup_encoder;
- }
return 0;
err_cleanup_encoder:
@@ -365,8 +352,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
analogix_dp_unbind(dp->adp);
dp->encoder.funcs->destroy(&dp->encoder);
-
- dp->adp = ERR_PTR(-ENODEV);
}
static const struct component_ops rockchip_dp_component_ops = {
@@ -377,10 +362,15 @@ static const struct component_ops rockchip_dp_component_ops = {
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
int ret;
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret < 0)
return ret;
@@ -391,7 +381,12 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
+ dp->data = dp_data;
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = dp->data->chip_type;
+ dp->plat_data.power_on_start = rockchip_dp_poweron_start;
+ dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
ret = rockchip_dp_of_probe(dp);
if (ret < 0)
@@ -399,12 +394,19 @@ static int rockchip_dp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dp);
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(dev, &rockchip_dp_component_ops);
}
static int rockchip_dp_remove(struct platform_device *pdev)
{
+ struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &rockchip_dp_component_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index a18eabf692e4..8e731ed0d9d9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -651,7 +651,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
trace_drm_sched_process_job(s_fence);
+ dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b8c4dd..0ad30b112982 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -59,9 +59,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* If possible, avoid waiting for GPU with mmap_sem
- * held.
+ * held. We only do this if the fault allows retry and this
+ * is the first attempt.
*/
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault_flag_allow_retry_first(vmf->flags)) {
ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
@@ -135,7 +136,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ /*
+ * If the fault allows retry and this is the first
+ * fault attempt, we try to release the mmap_sem
+ * before waiting
+ */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
@@ -156,6 +162,89 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
+ * @vmf: Fault data
+ * @bo: The buffer object
+ * @page_offset: Page offset from bo start
+ * @fault_page_size: The size of the fault in pages.
+ * @pgprot: The page protections.
+ * Does additional checking whether it's possible to insert a PUD or PMD
+ * pfn and performs the insertion.
+ *
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
+ * a huge fault was not possible, or on insertion error.
+ */
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ pgoff_t i;
+ vm_fault_t ret;
+ unsigned long pfn;
+ pfn_t pfnt;
+ struct ttm_tt *ttm = bo->ttm;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ /* Fault should not cross bo boundary. */
+ page_offset &= ~(fault_page_size - 1);
+ if (page_offset + fault_page_size > bo->num_pages)
+ goto out_fallback;
+
+ if (bo->mem.bus.is_iomem)
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
+ else
+ pfn = page_to_pfn(ttm->pages[page_offset]);
+
+ /* pfn must be fault_page_size aligned. */
+ if ((pfn & (fault_page_size - 1)) != 0)
+ goto out_fallback;
+
+ /* Check that memory is contiguous. */
+ if (!bo->mem.bus.is_iomem) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
+ goto out_fallback;
+ }
+ } else if (bo->bdev->driver->io_mem_pfn) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
+ goto out_fallback;
+ }
+ }
+
+ pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
+#endif
+ else
+ WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
+
+ if (ret != VM_FAULT_NOPAGE)
+ goto out_fallback;
+
+ return VM_FAULT_NOPAGE;
+out_fallback:
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif
+
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
@@ -163,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
+ * @fault_page_size: The size of the fault in pages.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
@@ -176,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault)
+ pgoff_t num_prefault,
+ pgoff_t fault_page_size)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -268,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = pgprot_decrypted(prot);
}
+ /* We don't prefault on huge faults. Yet. */
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
+ ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
+ fault_page_size, prot);
+ goto out_io_unlock;
+ }
+
/*
* Speculatively prefault a number of pages. Only error on
* first page.
@@ -334,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return ret;
prot = vma->vm_page_prot;
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -445,7 +543,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = ttm_bo_vm_access,
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index d2112b43395a..282348e071fe 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -42,6 +42,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
+ if (ret)
+ return ret;
+
vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
if (!vbox)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 8f956156eb8e..625bfcf52dc4 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -673,11 +673,23 @@ static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
- /* HSM clock must be 108% of the pixel clock. Additionally,
- * the AXI clock needs to be at least 25% of pixel clock, but
- * HSM ends up being the limiting factor.
+ /*
+ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
+ * be faster than pixel clock, infinitesimally faster, tested in
+ * simulation. Otherwise, exact value is unimportant for HDMI
+ * operation." This conflicts with bcm2835's vc4 documentation, which
+ * states HSM's clock has to be at least 108% of the pixel clock.
+ *
+ * Real life tests reveal that vc4's firmware statement holds up, and
+ * users are able to use pixel clocks closer to HSM's, namely for
+ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
+ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
+ * 162MHz.
+ *
+ * Additionally, the AXI clock needs to be at least 25% of
+ * pixel clock, but HSM ends up being the limiting factor.
*/
- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
+ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 5c3515e8cce1..31f85f09f1fc 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -11,4 +11,5 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
ttm_object.o ttm_lock.o
+vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 71e45b568511..c2247a893ed4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1247,6 +1247,18 @@ static void vmw_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static unsigned long
+vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+
+ return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
+ &dev_priv->vma_manager);
+}
+
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
@@ -1418,6 +1430,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .get_unmapped_area = vmw_get_unmapped_area,
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5ddbcb9f6df4..8cdcd6e5f9e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1000,6 +1000,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
size_t gran);
+
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -1510,6 +1511,17 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
+#endif
+
+/* Transparent hugepage support - vmwgfx_thp.c */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern const struct ttm_mem_type_manager_func vmw_thp_func;
+#else
+#define vmw_thp_func ttm_bo_manager_func
+#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 60cfbfadd3f2..d4d66532f9c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -473,11 +473,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
* a lot of unnecessary write faults.
*/
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
- prot = vma->vm_page_prot;
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
else
prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -486,3 +486,75 @@ out_unlock:
return ret;
}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgprot_t prot;
+ vm_fault_t ret;
+ pgoff_t fault_page_size;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+ bool is_cow_mapping =
+ (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+
+ switch (pe_size) {
+ case PE_SIZE_PMD:
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+ break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ case PE_SIZE_PUD:
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ return VM_FAULT_FALLBACK;
+ }
+
+ /* Always do write dirty-tracking and COW on PTE level. */
+ if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+ return VM_FAULT_FALLBACK;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /*
+ * Write protect, so we get a new fault on write, and can
+ * split.
+ */
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+ } else {
+ prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
new file mode 100644
index 000000000000..b7c816ba7166
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Huge page-table-entry support for IO memory.
+ *
+ * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
+ */
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+/**
+ * struct vmw_thp_manager - Range manager implementing huge page alignment
+ *
+ * @mm: The underlying range manager. Protected by @lock.
+ * @lock: Manager lock.
+ */
+struct vmw_thp_manager {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long align_pages,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ unsigned long lpfn,
+ enum drm_mm_insert_mode mode)
+{
+ if (align_pages >= mem->page_alignment &&
+ (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+ return drm_mm_insert_node_in_range(mm, node,
+ mem->num_pages,
+ align_pages, 0,
+ place->fpfn, lpfn, mode);
+ }
+
+ return -ENOSPC;
+}
+
+static int vmw_thp_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node;
+ unsigned long align_pages;
+ unsigned long lpfn;
+ enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
+
+ spin_lock(&rman->lock);
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
+ align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages,
+ place, mem, lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+ }
+
+ align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
+ lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment, 0,
+ place->fpfn, lpfn, mode);
+found_unlock:
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
+
+ return 0;
+}
+
+
+
+static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(mem->mm_node);
+ spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+ }
+}
+
+static int vmw_thp_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct vmw_thp_manager *rman;
+
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
+ return -ENOMEM;
+
+ drm_mm_init(&rman->mm, 0, p_size);
+ spin_lock_init(&rman->lock);
+ man->priv = rman;
+ return 0;
+}
+
+static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ spin_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ spin_unlock(&rman->lock);
+ kfree(rman);
+ man->priv = NULL;
+ return 0;
+ }
+ spin_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void vmw_thp_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ spin_lock(&rman->lock);
+ drm_mm_print(&rman->mm, printer);
+ spin_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func vmw_thp_func = {
+ .init = vmw_thp_init,
+ .takedown = vmw_thp_takedown,
+ .get_node = vmw_thp_get_node,
+ .put_node = vmw_thp_put_node,
+ .debug = vmw_thp_debug
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 3f3b2c7a208a..bf0bc4697959 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -749,7 +749,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &vmw_thp_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index aa7e50f63b94..3c03b1746661 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
+ .close = ttm_bo_vm_close,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ .huge_fault = vmw_bo_vm_huge_fault,
+#endif
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index b91d23b5f3ae..1fd458e877ca 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
obj = xen_drm_front_gem_create(dev, args->size);
if (IS_ERR_OR_NULL(obj)) {
- ret = PTR_ERR(obj);
+ ret = PTR_ERR_OR_ZERO(obj);
goto fail;
}
diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig
new file mode 100644
index 000000000000..c24e9edd022e
--- /dev/null
+++ b/drivers/gpu/trace/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config TRACE_GPU_MEM
+ bool
diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile
new file mode 100644
index 000000000000..b70fbdc5847f
--- /dev/null
+++ b/drivers/gpu/trace/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o
diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c
new file mode 100644
index 000000000000..01e855897b6d
--- /dev/null
+++ b/drivers/gpu/trace/trace_gpu_mem.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPU memory trace points
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu_mem.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total);