summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-06-08 11:55:33 +1000
committerDave Airlie <airlied@redhat.com>2020-06-08 11:55:33 +1000
commitfa3fa2228c886435b3eea22cea4767f3fb07671f (patch)
tree52f94ac0a1e39579edcdce2b7cbdfc1c3f62e68a /drivers
parent3f29eacc3ea828cde4d1f06f74fa7f2ca479c216 (diff)
parenta24eaa5c51255b344d5a321f1eeb3205f2775498 (diff)
Merge tag 'amd-drm-fixes-5.8-2020-06-04' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-fixes-5.8-2020-06-04 amdgpu: - Prevent hwmon accesses while GPU is in reset - CTF interrupt fix - Backlight fix for renoir - Fix for display sync groups - Display bandwidth validation workaround Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200604181900.4609-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c171
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
4 files changed, 207 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d7646cbce346..775e389c9a13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -163,6 +163,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -196,6 +199,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -297,6 +303,9 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -334,6 +343,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -433,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -472,6 +487,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -508,6 +526,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -525,6 +546,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
@@ -580,6 +604,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -619,6 +646,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -721,6 +751,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (count > 127)
return -EINVAL;
@@ -810,6 +843,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -859,6 +895,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
@@ -899,6 +938,9 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -955,6 +997,9 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1018,6 +1063,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1049,6 +1097,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1076,6 +1127,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t mask = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1107,6 +1161,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1134,6 +1191,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1167,6 +1227,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1194,6 +1257,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1227,6 +1293,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1254,6 +1323,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1287,6 +1359,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1314,6 +1389,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1347,6 +1425,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1372,6 +1453,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1410,6 +1494,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1435,6 +1522,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1493,6 +1583,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1528,6 +1621,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1587,6 +1683,9 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1620,6 +1719,9 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1658,6 +1760,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->flags & AMD_IS_APU)
return -ENODATA;
@@ -1694,6 +1799,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1888,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (channel >= PP_TEMP_MAX)
return -EINVAL;
@@ -2019,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2050,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err, ret;
int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2099,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2148,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2178,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2207,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2232,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2256,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2285,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2331,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2363,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2403,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
u32 vddgfx;
int r, size = sizeof(vddgfx);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2435,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
u32 vddnb;
int r, size = sizeof(vddnb);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
@@ -2472,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
int r, size = sizeof(u32);
unsigned uw;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2508,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2537,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2567,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -2605,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
uint32_t sclk;
int r, size = sizeof(sclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2637,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
uint32_t mclk;
int r, size = sizeof(mclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -3497,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
u32 flags = 0;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d53c60b37cc6..f42e7e67ddba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1356,7 +1356,7 @@ static int dm_late_init(void *handle)
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = NULL;
- bool ret = false;
+ bool ret;
if (!adev->dm.fw_dmcu)
return detect_mst_link_for_all_connectors(adev->ddev);
@@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- /* todo will enable for navi10 */
- if (adev->asic_type <= CHIP_RAVEN) {
- ret = dmcu_load_iram(dmcu, params);
+ ret = dmcu_load_iram(dmcu, params);
- if (!ret)
- return -EINVAL;
- }
+ if (!ret)
+ return -EINVAL;
return detect_mst_link_for_all_connectors(adev->ddev);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 45cfb7c45566..6f93a6ca4cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1016,9 +1016,17 @@ static void program_timing_sync(
}
}
- /* set first pipe with plane as master */
+ /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
if (j == 0)
break;
@@ -1039,9 +1047,17 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other pipes with plane as they have already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -2522,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
+
commit_planes_for_stream(
dc,
srf_updates,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index ae0361e225bb..aa76c2cea747 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1561,6 +1561,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
* events for SMCToHost interrupt.
*/
uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
@@ -1590,6 +1591,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
if (src_id == 0xfe) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
+
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");