diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/hwmgr')
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | 182 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 1 |
9 files changed, 193 insertions, 155 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index ad1f6b57884b..b314d09d41af 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -728,9 +728,6 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr) if (clock < stable_pstate_sclk) clock = stable_pstate_sclk; - } else { - if (clock < hwmgr->gfx_arbiter.sclk) - clock = hwmgr->gfx_arbiter.sclk; } if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) { @@ -1085,14 +1082,8 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t num_of_active_displays = 0; struct cgs_display_info info = {0}; - cz_ps->evclk = hwmgr->vce_arbiter.evclk; - cz_ps->ecclk = hwmgr->vce_arbiter.ecclk; - cz_ps->need_dfs_bypass = true; - cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 || - hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0); - cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? @@ -1105,9 +1096,6 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; - if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1]) || (num_of_active_displays >= 3); @@ -1339,22 +1327,13 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*Program HardMin based on the vce_arbiter.ecclk */ - if (hwmgr->vce_arbiter.ecclk == 0) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetEclkHardMin, 0); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetEclkHardMin, 0); /* disable ECLK DPM 0. Otherwise VCE could hang if * switching SCLK from DPM 0 to 6/7 */ - smum_send_msg_to_smc_with_parameter(hwmgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetEclkSoftMin, 1); - } else { - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - } } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 623cff90233d..2b0c53fe4c8d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -112,26 +112,29 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->force_dpm_level != NULL) { + if (hwmgr->hwmgr_func->force_dpm_level != NULL) ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); - if (ret) - return ret; - - if (hwmgr->hwmgr_func->set_power_profile_state) { - if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE) - ret = hwmgr->hwmgr_func->set_power_profile_state( - hwmgr, - &hwmgr->gfx_power_profile); - else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE) - ret = hwmgr->hwmgr_func->set_power_profile_state( - hwmgr, - &hwmgr->compute_power_profile); - } - } return ret; } +int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (hwmgr->hwmgr_func->set_power_profile_state) { + if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE) + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->gfx_power_profile); + else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE) + ret = hwmgr->hwmgr_func->set_power_profile_state( + hwmgr, + &hwmgr->compute_power_profile); + } + return ret; +} + int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *adjusted_ps, const struct pp_power_state *current_ps) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index ce59e0e67cb2..0229f774f7a9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -149,6 +149,7 @@ int hwmgr_early_init(struct pp_instance *handle) hwmgr->power_source = PP_PowerSource_AC; hwmgr->pp_table_version = PP_TABLE_V1; hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; + hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); hwmgr->fan_ctrl_is_in_default_mode = true; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index ffa44bbb218e..95ab772e0c3e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -244,6 +244,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, } phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) + hwmgr->dpm_level = hwmgr->request_dpm_level; + + phm_reset_power_profile_state(hwmgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 3e0b267c74a8..569073e3a5a1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c @@ -159,7 +159,6 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr) static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) { - struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend); struct PP_Clocks clocks = {0}; struct pp_display_clock_request clock_req; @@ -170,39 +169,6 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req), "Attempt to set DCF Clock Failed!", return -EINVAL); - if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || - ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { - rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; - rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100; - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMinVcn, - (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min); - } - - if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && - ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetHardMinSocclkByFreq, - hwmgr->gfx_arbiter.sclk_hard_min / 100); - rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq); - } - - if ((hwmgr->gfx_arbiter.gfxclk != 0) && - (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetMinVideoGfxclkFreq, - hwmgr->gfx_arbiter.gfxclk / 100); - rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq); - } - - if ((hwmgr->gfx_arbiter.fclk != 0) && - (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetMinVideoFclkFreq, - hwmgr->gfx_arbiter.fclk / 100); - rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq); - } - return 0; } @@ -518,17 +484,161 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { + if (hwmgr->smu_version < 0x1E3700) { + pr_info("smu firmware version too old, can not set dpm level\n"); + return 0; + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + RAVEN_UMD_PSTATE_PEAK_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + RAVEN_UMD_PSTATE_PEAK_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinSocclkByFreq, + RAVEN_UMD_PSTATE_PEAK_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinVcn, + RAVEN_UMD_PSTATE_VCE); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + RAVEN_UMD_PSTATE_PEAK_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + RAVEN_UMD_PSTATE_PEAK_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxSocclkByFreq, + RAVEN_UMD_PSTATE_PEAK_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxVcn, + RAVEN_UMD_PSTATE_VCE); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + RAVEN_UMD_PSTATE_MIN_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + RAVEN_UMD_PSTATE_MIN_GFXCLK); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + RAVEN_UMD_PSTATE_MIN_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + RAVEN_UMD_PSTATE_MIN_FCLK); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + RAVEN_UMD_PSTATE_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + RAVEN_UMD_PSTATE_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinSocclkByFreq, + RAVEN_UMD_PSTATE_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinVcn, + RAVEN_UMD_PSTATE_VCE); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + RAVEN_UMD_PSTATE_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + RAVEN_UMD_PSTATE_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxSocclkByFreq, + RAVEN_UMD_PSTATE_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxVcn, + RAVEN_UMD_PSTATE_VCE); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + RAVEN_UMD_PSTATE_MIN_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + RAVEN_UMD_PSTATE_MIN_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinSocclkByFreq, + RAVEN_UMD_PSTATE_MIN_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinVcn, + RAVEN_UMD_PSTATE_MIN_VCE); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + RAVEN_UMD_PSTATE_PEAK_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + RAVEN_UMD_PSTATE_PEAK_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxSocclkByFreq, + RAVEN_UMD_PSTATE_PEAK_SOCCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxVcn, + RAVEN_UMD_PSTATE_VCE); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + RAVEN_UMD_PSTATE_MIN_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + RAVEN_UMD_PSTATE_MIN_GFXCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + RAVEN_UMD_PSTATE_MIN_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + RAVEN_UMD_PSTATE_MIN_FCLK); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } return 0; } static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) { - return 0; + struct rv_hwmgr *data; + + if (hwmgr == NULL) + return -EINVAL; + + data = (struct rv_hwmgr *)(hwmgr->backend); + + if (low) + return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; + else + return data->clock_vol_info.vdd_dep_on_fclk->entries[ + data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk; } static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) { - return 0; + struct rv_hwmgr *data; + + if (hwmgr == NULL) + return -EINVAL; + + data = (struct rv_hwmgr *)(hwmgr->backend); + + if (low) + return data->gfx_min_freq_limit; + else + return data->gfx_max_freq_limit; } static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h index 9dc503055394..c3bc311dc59f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h @@ -304,4 +304,19 @@ struct pp_hwmgr; int rv_init_function_pointers(struct pp_hwmgr *hwmgr); +/* UMD PState Raven Msg Parameters in MHz */ +#define RAVEN_UMD_PSTATE_GFXCLK 700 +#define RAVEN_UMD_PSTATE_SOCCLK 626 +#define RAVEN_UMD_PSTATE_FCLK 933 +#define RAVEN_UMD_PSTATE_VCE 0x03C00320 + +#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100 +#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757 +#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200 + +#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200 +#define RAVEN_UMD_PSTATE_MIN_FCLK 400 +#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200 +#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C + #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 8edb0c4c3876..40adc855c416 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2722,9 +2722,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - cgs_get_active_displays_info(hwmgr->device, &info); minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; @@ -2754,38 +2751,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, minimum_clocks.memoryClock = stable_pstate_mclk; } - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - smu7_ps->performance_levels[1].engine_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - smu7_ps->performance_levels[1].memory_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - disable_mclk_switching_for_frame_lock = phm_cap_enabled( hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 07d256d136ad..2d55dabc77d4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -426,9 +426,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_VR0HOT].supported = true; smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); - vega10_read_arg_from_smc(hwmgr, &(data->smu_version)); + vega10_read_arg_from_smc(hwmgr, &(hwmgr->smu_version)); /* ACG firmware has major version 5 */ - if ((data->smu_version & 0xff000000) == 0x5000000) + if ((hwmgr->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; if (data->registry_data.didt_support) @@ -2879,8 +2879,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "DPM is already running right , skipping re-enablement!", return 0); - if ((data->smu_version == 0x001c2c00) || - (data->smu_version == 0x001c2d00)) { + if ((hwmgr->smu_version == 0x001c2c00) || + (hwmgr->smu_version == 0x001c2d00)) { tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UpdatePkgPwrPidAlpha, 1); PP_ASSERT_WITH_CODE(!tmp_result, @@ -3124,9 +3124,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - cgs_get_active_displays_info(hwmgr->device, &info); /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ @@ -3165,38 +3162,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, minimum_clocks.memoryClock = stable_pstate_mclk; } - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - vega10_ps->performance_levels[1].gfx_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - vega10_ps->performance_levels[1].mem_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - disable_mclk_switching_for_frame_lock = phm_cap_enabled( hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); @@ -3819,10 +3784,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) uint32_t low_sclk_interrupt_threshold = 0; if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) && - (hwmgr->gfx_arbiter.sclk_threshold != - data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = - hwmgr->gfx_arbiter.sclk_threshold; + (data->low_sclk_interrupt_threshold != 0)) { low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 8f7358cc3327..e8507ff8dbb3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -387,7 +387,6 @@ struct vega10_hwmgr { struct vega10_smc_state_table smc_state_table; uint32_t config_telemetry; - uint32_t smu_version; uint32_t acg_loop_state; uint32_t mem_channels; }; |