summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorMaxime Ripard <maxime.ripard@bootlin.com>2019-01-11 16:32:10 +0100
committerMaxime Ripard <maxime.ripard@bootlin.com>2019-01-11 16:32:10 +0100
commit23d19ba06b9c5614d6457f5fed349ec8f6d4dac9 (patch)
tree39f0b657e5b1b5b958780cae4ae6360f69548d50 /drivers/gpu/drm/amd/amdgpu
parent7d0250ed8e69fb6a66caecf60b8753a21224cc1a (diff)
parente3d093070eb0b5e3df668d3eb04100ea79343c65 (diff)
Merge drm/drm-next into drm-misc-next
drm-next has been forwarded to 5.0-rc1, and we need it to apply the damage helper for dirtyfb series from Noralf Trønnes. Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c439
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
81 files changed, 1877 insertions, 1097 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 42f882c633ee..bcef6ea4bcf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -81,6 +81,8 @@
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -162,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -234,7 +237,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -361,123 +364,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
- */
-
- /* sDMA engines reserved from 0xe0 -0xef */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
-
- /* For vega10 sriov, the sdma doorbell must be fixed as follow
- * to keep the same setting with host driver, or it will
- * happen conflicts
- */
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
* IRQS.
*/
@@ -654,6 +540,8 @@ struct amdgpu_asic_funcs {
struct amdgpu_ring *ring);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
};
/*
@@ -976,6 +864,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -989,9 +880,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1023,6 +911,10 @@ struct amdgpu_device {
unsigned long last_mm_index;
bool in_gpu_reset;
struct mutex lock_reset;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1113,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1159,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
/*
* functions used by amdgpu_encoder.c
*/
@@ -1252,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc526419..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
};
struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
+ bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
bool temperature_change;
- bool graphics_device_types;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
};
struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+ struct amdgpu_dm_backlight_caps backlight_caps;
};
/* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
@@ -311,6 +299,65 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
+
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ out:
return ret;
}
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps)
+{
+ if (!adev->atif) {
+ caps->caps_valid = false;
+ return;
+ }
+ caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+ caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
@@ -816,6 +886,5 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bcf1666fb31d..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
+#include <linux/dma-buf.h>
const struct kgd2kfd_calls *kgd2kfd;
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
if (ret)
@@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i, n;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_start_offset);
if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
return;
}
@@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- if (adev->asic_type == CHIP_VEGA10) {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- } else {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- }
+ gpu_resources.sdma_doorbell[0][i] =
+ adev->doorbell_index.sdma_engine0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ adev->doorbell_index.sdma_engine1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
}
/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
* SDMA, IH and VCN. So don't use them for the CP.
@@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val = 0x0e0;
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd->device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd->suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->resume(adev->kfd.dev);
return r;
}
@@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->pre_reset(adev->kfd.dev);
return r;
}
@@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->post_reset(adev->kfd.dev);
return r;
}
@@ -428,6 +434,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -510,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -524,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index bcf587b4ba98..70429f7aa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
#include "amdgpu_vm.h"
extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -144,6 +149,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 72a357dae070..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0e2a56b6a9b6..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 03b604c96d94..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059565f9..be1ab43473c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
- uint64_t max_userptr_mem_limit;
+ uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
- int64_t userptr_mem_used;
+ int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (kernel) memory - 3/8th System RAM
- * Userptr memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 3/4th System RAM
+ * TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
- (kfd_mem_limit.max_userptr_mem_limit >> 20));
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
- size_t acc_size;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
- kfd_mem_limit.max_system_mem_limit) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
- if ((kfd_mem_limit.system_mem_used + acc_size >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
- kfd_mem_limit.max_userptr_mem_limit)) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += acc_size;
- kfd_mem_limit.userptr_mem_used += size;
+ /* TTM GTT memory */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size + size;
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ /* Userptr */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size;
+ } else {
+ /* VRAM and SG */
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
+ ret = -ENOMEM;
+ } else {
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
-err_no_mem:
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ } else {
kfd_mem_limit.system_mem_used -= acc_size;
- kfd_mem_limit.userptr_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "kfd TTM memory accounting unbalanced");
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
@@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
return 0;
}
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
-{
- int ret = amdgpu_sync_fence(adev, sync, f, false);
-
- /* Sync objects can't handle multiple GPUs (contexts) updating
- * sync->last_vm_update. Fortunately we don't need it for
- * KFD's purposes, so we can just drop that fence.
- */
- if (sync->last_vm_update) {
- dma_fence_put(sync->last_vm_update);
- sync->last_vm_update = NULL;
- }
-
- return ret;
-}
-
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return sync_vm_fence(adev, sync, vm->last_update);
+ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
/* Add the eviction fence back */
amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
- sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
}
@@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +887,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
return 0;
}
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
@@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11) ?
+ adev->asic_type != CHIP_POLARIS11 &&
+ adev->asic_type != CHIP_POLARIS12) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- /* Wait for PD/PTs validate to finish */
- /* FIXME: I think this isn't needed */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
- ttm_bo_wait(&bo->tbo, false, false);
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
-
+ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Wait for validate to finish and attach new eviction fence */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
- ttm_bo_wait(&mem->bo->tbo, false, false);
+ /* Attach new eviction fence to all BOs */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head)
amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index ceadeeadfa56..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -381,7 +381,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if ((adev->pdev->device == 0x67df) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc54e9efd910..1c49b8266d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -124,14 +125,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1421,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
return PTR_ERR(fence);
+ if (!fence)
+ fence = dma_fence_get_stub();
+
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 0c590ddf250a..7e22be7ca68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -43,7 +43,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
domain, bo,
NULL, &ptr);
- if (!bo)
+ if (!*bo)
return -ENOMEM;
memset(ptr, 0, size);
@@ -74,7 +74,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &bo->tbo;
- csa_tv.shared = true;
+ csa_tv.num_shared = 1;
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b54236102d..d85184b5b35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
@@ -247,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 590588a82471..8a078f4ae73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1851,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -2340,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2438,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2458,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2479,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -3151,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3306,6 +3258,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
@@ -3329,31 +3283,13 @@ disabled:
return false;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
-
- dev_info(adev->dev, "GPU reset begin!\n");
-
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
-
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3373,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3388,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->base.sched == &ring->sched) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3398,21 +3468,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+}
- /*unlock kfd */
- amdgpu_amdkfd_post_reset(adev);
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->hive_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->hive_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26de50f9..15ce7e681d67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
if (!adev->mode_info.max_bpc_property)
return -ENOMEM;
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev->ddev, 0,
+ "abm level", 0, 4);
+ if (!adev->mode_info.abm_level_property)
+ return -ENOMEM;
}
return 0;
@@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..be620b29f4aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t sdma_engine0;
+ uint32_t sdma_engine1;
+ uint32_t sdma_engine2;
+ uint32_t sdma_engine3;
+ uint32_t sdma_engine4;
+ uint32_t sdma_engine5;
+ uint32_t sdma_engine6;
+ uint32_t sdma_engine7;
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..c806f984bcc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
/**
* DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
*/
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
/**
@@ -864,6 +865,7 @@ static const struct pci_device_id pciidlist[] = {
/* VEGAM */
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
@@ -872,7 +874,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +893,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
@@ -1220,9 +1229,6 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1252,16 +1258,14 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
+ kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
/* let modprobe override vga console setting */
- return pci_register_driver(pdriver);
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -1273,7 +1277,7 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- pci_unregister_driver(pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..f4f00217546e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6a70c0b7105f..97a60da62004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -250,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ ring->doorbell_index = adev->doorbell_index.kiq;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 8c57924c075f..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..f877bb78d10a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
};
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 6b6524f04ce0..b7968f426862 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -145,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
@@ -371,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
trace_amdgpu_iv(entry);
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, entry);
- if (r)
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e0af44fd6a0c..0a17fb1af204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
@@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
return;
}
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9b3164c0f861..bc62bf41b7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- /* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
-
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index e55508b39496..3e6823fdd939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: mmu notifier context
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
- amdgpu_mn_invalidate_node(node, start, end);
+ amdgpu_mn_invalidate_node(node, range->start, end);
}
return 0;
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, mm);
+ range->start,
+ end))
+ amdgpu_amdkfd_evict_userptr(mem, range->mm);
}
}
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 0dc2c5c57015..aadd0fa42e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
@@ -294,13 +293,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- /* it is used to enter or exit into free sync mode */
- int (*notify_freesync)(struct drm_device *dev, void *data,
- struct drm_file *filp);
- /* it is used to allow enablement of freesync mode */
- int (*set_freesync_property)(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
};
@@ -340,6 +332,8 @@ struct amdgpu_mode_info {
struct drm_property *dither_property;
/* maximum number of bits per channel for monitor color */
struct drm_property *max_bpc_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cf768acb51dc..728e15e5d68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -959,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (!bo->pin_count) {
+ if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..9291c2f837e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7235cd0b0fa9..1f61ed95727c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip fan attributes on APU */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3e44d889f7af..71913a18d142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
@@ -332,7 +330,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e05dc66b1090..8fab0d637ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -155,10 +155,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -192,7 +196,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
@@ -536,8 +540,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 9ec5d1a666a6..3ee573b4016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -83,12 +83,13 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
- uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
+ int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
+ int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
+ bool (*support_vmr_ring)(struct psp_context *psp);
};
struct psp_xgmi_context {
@@ -192,12 +193,14 @@ struct psp_xgmi_topology_info {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_support_vmr_ring(psp) \
+ ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
-#define psp_xgmi_get_hive_id(psp) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp, node_id) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
+#define psp_xgmi_get_hive_id(psp, hive_id) \
+ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
@@ -217,7 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5b75bdc8dc28..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 0beb01fef83f..d87e828a084b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 21
+#define AMDGPU_MAX_RINGS 23
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 69896f451e8a..4e5d13e41f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a41060f..5eb63288d157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
struct drm_sched_entity entity;
struct delayed_work idle_work;
unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e2e42e3fbcf3..ecf6f96df2ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+ if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
- new_state.fw_based = adev->vcn.pause_state.fw_based;
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
- new_state.jpeg = adev->vcn.pause_state.jpeg;
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index cfee74732edb..462a04e0f5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -334,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amdgim_pf2vf_info_header *)(
+ (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0728fbc9a692..722deefc0a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
- struct amdgim_pf2vf_info_header *p_pf2vf;
- struct amdgim_vf2pf_info_header *p_vf2pf;
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 {
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 {
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 58a2363040dd..e73d152659a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -1656,9 +1653,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (!amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue;
- } else if (frag >= parent_shift) {
+ } else if (frag >= parent_shift &&
+ cursor.level - 1 != adev->vm_manager.root_level) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again.
+ * shift we should go up one level and check it again
+ * unless one level up is the root level.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
return -ENOENT;
@@ -1666,10 +1665,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
/* Looks good so far, calculate parameters for the update */
- incr = AMDGPU_GPU_PAGE_SIZE << shift;
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (mask + 1) << shift;
+ entry_end = (uint64_t)(mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1682,7 +1681,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
flags | AMDGPU_PTE_FRAG(frag));
pe_start += nptes * 8;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1842,10 +1841,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3026,6 +3021,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
@@ -3055,7 +3054,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3268,42 +3266,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..e8dcfd59fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 909216a9b447..8a8bc60cb6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
*/
#include <linux/list.h>
#include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
-};
-
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+ return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
int i;
struct amdgpu_hive_info *tmp;
@@ -58,39 +59,73 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
tmp = &xgmi_hives[hive_count++];
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
+ mutex_init(&tmp->hive_lock);
+
return tmp;
}
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret = -EINVAL;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ hive->number_devices,
+ &hive->topology_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ else
+ dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id,
+ adev->gmc.xgmi.hive_id);
+
+ return ret;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info *tmp_topology;
+ struct psp_xgmi_topology_info *hive_topology;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
- struct amdgpu_device *tmp_adev;
+ struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
- adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
- tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL);
- if (!tmp_topology)
- return -ENOMEM;
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
+
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
if (!hive)
goto exit;
+ hive_topology = &hive->topology_info;
+
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
- tmp_topology->nodes[count++].node_id = entry->node_id;
+ hive_topology->nodes[count++].node_id = entry->node_id;
+ hive->number_devices = count;
/* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -101,25 +136,33 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- /* Each psp need to set the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
- if (ret) {
- dev_err(tmp_adev->dev,
- "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.node_id,
- tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
break;
- }
}
- if (!ret)
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id,
- adev->gmc.xgmi.hive_id);
exit:
mutex_unlock(&xgmi_mutex);
- kfree(tmp_topology);
return ret;
}
+
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ mutex_lock(&xgmi_mutex);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ goto exit;
+
+ if (!(hive->number_devices--))
+ mutex_destroy(&hive->hive_lock);
+
+exit:
+ mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..6151eb9c8ad3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct psp_xgmi_topology_info topology_info;
+ int number_devices;
+ struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f57e9f3..71c50d8900e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f15a0a0..54c625a2e570 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..8a8b4967a101 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..9d3ea298e116 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index f467b9bd090d..3a9fb6018c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4363,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cb066a8dccd7..381f593b0cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -1891,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2002,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -4069,6 +4068,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
@@ -4216,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
- AMDGPU_DOORBELL_GFX_RING0);
+ adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4645,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
- WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
- WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4948,14 +4952,13 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
@@ -5052,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c27caa144c57..7556716038d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -645,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1566,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1655,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle)
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
@@ -2326,12 +2340,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+ udelay(50);
/* carrizo do enable cp interrupt after cp inited */
- if (!(adev->flags & AMD_IS_APU))
+ if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
- udelay(50);
+ udelay(50);
+ }
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
@@ -2981,9 +2996,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
+ (adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 531aaf377592..1ad7e6b8ed1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
- chip_name = "polaris11";
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff))))
+ chip_name = "polaris11_k";
+ else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2))
+ chip_name = "polaris11_k";
+ else
+ chip_name = "polaris11";
break;
case CHIP_POLARIS10:
- chip_name = "polaris10";
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7)))
+ chip_name = "polaris10_k";
+ else
+ chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+ else
+ chip_name = "polaris12";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 data, vbios_version;
+ u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- vbios_version = data & 0xf;
-
- if (vbios_version == 0)
- return 0;
-
if (!adev->gmc.fw)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 811231e4ec53..bacdaef77b6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -338,9 +397,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub = &adev->vmhub[i];
u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset &&
- adev->gfx.kiq.ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ !adev->in_gpu_reset) {
uint32_t req = hub->vm_inv_eng0_req + eng;
uint32_t ack = hub->vm_inv_eng0_ack + eng;
@@ -656,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 0, 1: idle
- * Engine 2, 3: firmware
- * Engine 4~13: amdgpu ring, subject to change when ring number changes
- * Engine 14~15: idle
- * Engine 16: kfd tlb invalidation
- * Engine 17: Gart flushes
- */
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
unsigned i;
- int r;
+ unsigned vmhub, inv_eng;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ ring->vm_inv_eng = inv_eng - 1;
+ change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
- /* Engine 16 is used for KFD and 17 for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 16);
+ return 0;
+}
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ if (!gmc_v9_0_keep_stolen_memory(adev))
+ amdgpu_bo_late_init(adev);
+
+ r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
@@ -899,6 +970,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -931,7 +1005,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index b030ca5ea107..5c8deac65580 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,6 +24,16 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..a3984d10b604 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d528dd..6a0fcd67662a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6f9c54978cc1..accdedd63c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -32,6 +32,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f8cee95d61cc..4cd31a276dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,6 +31,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
@@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..0de00fbe9233 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c2205485a..d78b4306a36f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index e5dd052d9e06..0c6e7f9b143f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
@@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr->sos_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err)
- goto out;
+ goto out2;
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out;
+ goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
@@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
out:
- if (err) {
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- }
+ dev_err(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
return err;
}
@@ -171,8 +193,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -288,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
+static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -296,26 +328,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -326,15 +379,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -373,7 +435,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_v11_0_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -402,7 +467,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -547,7 +616,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -640,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
-static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -653,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
}
-static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -671,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
}
static const struct psp_funcs psp_v11_0_funcs = {
@@ -692,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
+ .support_vmr_ring = psp_v11_0_support_vmr_ring,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 9cea0bbe4525..79694ff16969 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -356,12 +359,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,9 +593,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
}
/*send the mode 1 reset command*/
- WREG32(offset, 0x70000);
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6a25f92d566..1bccc5fe2d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1146,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
} else {
ring->use_pollmem = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f4490cdd9804..fd0bfe140ee0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
OFFSET, ring->doorbell_index);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
- /* TODO: enable doorbell support */
- /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index);*/
- sdma_v4_0_ring_set_wptr(ring);
+ /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+ sdma_v4_0_page_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
@@ -1449,23 +1447,45 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ uint fw_version = adev->sdma.instance[0].fw_version;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return fw_version >= 430;
+ case CHIP_VEGA12:
+ /*return fw_version >= 31;*/
+ return false;
+ case CHIP_VEGA20:
+ return fw_version >= 123;
+ default:
+ return false;
+ }
+}
+
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
- adev->sdma.has_page_queue = false;
- } else {
+ else
adev->sdma.num_instances = 2;
- /* TODO: Page queue breaks driver reload under SRIOV */
- if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
- adev->sdma.has_page_queue = false;
- else if (adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_VEGA12)
- adev->sdma.has_page_queue = true;
+
+ r = sdma_v4_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
}
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ adev->sdma.has_page_queue = false;
+ else if (sdma_v4_0_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1474,7 +1494,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
-
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1493,12 +1512,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v4_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1507,15 +1520,10 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- if (adev->asic_type == CHIP_VEGA10)
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
- else
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1529,7 +1537,15 @@ static int sdma_v4_0_sw_init(void *handle)
if (adev->sdma.has_page_queue) {
ring = &adev->sdma.instance[i].page;
ring->ring_obj = NULL;
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
+ ring->doorbell_index += 0x400;
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1689,13 +1705,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- /* XXX compute */
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- amdgpu_fence_process(&adev->sdma.instance[instance].page);
+ if (adev->asic_type != CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..2938fb9f17cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
-}
-
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb1a187..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
@@ -613,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .set_vga_state = &soc15_vga_set_state,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega20_doorbell_index_init,
};
static int soc15_common_early_init(void *handle)
@@ -632,11 +653,11 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->asic_funcs = &soc15_asic_funcs;
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +681,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_VEGA12:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +704,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
+ adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,6 +727,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
+ adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->external_rev_id = adev->rev_id + 0x81;
else if (adev->pdev->device == 0x15d8)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad7804dc40..a66c8bfbbaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 958b10a57073..49c262540940 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -49,14 +49,19 @@
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
+ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
- udelay(2); \
+ if (old_ != tmp_) { \
+ loop = adev->usec_timeout; \
+ old_ = tmp_; \
+ } else \
+ udelay(1); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
- DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd06b5c7..15da06ddeb75 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 90bbcee00f28..d69c8f6daaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 1c5e12703103..ee8cd06ddc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f184842ef2a2..d4f4a66f8324 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -400,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 8a4595968d98..aef924026a28 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -430,16 +430,12 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- sprintf(ring->name, "uvd<%d>", j);
+ sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
return r;
@@ -447,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
@@ -455,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
@@ -465,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3e84840859a7..2668effadd27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 0054ba1b9a68..9fb34b7d8e03 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
* so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c1a03505f956..89bb2fef90eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -213,8 +214,9 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
- vcn_v1_0_stop(adev);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
@@ -1086,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize wptr */
+ /* initialize JPEG wptr */
+ ring = &adev->vcn.ring_jpeg;
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
@@ -1158,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{
int ret_code = 0;
+ uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- if (!ret_code) {
- int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- /* wait for read ptr to be equal to write ptr */
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f9252a..2c250b01a903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
-
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
-
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
-}
-
-/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
r = amdgpu_irq_init(adev);
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2bc190d..422674bb3cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+ /* In unit of dword doorbell */
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4fcb517..edce413fda9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+ adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+ adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+ adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+ adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+ adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+ adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d35e9de..77e367459101 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- r = RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ r = RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
@@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- WREG32(mmPCIE_DATA, v);
- (void)RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ WREG32_NO_KIQ(mmPCIE_DATA, v);
+ (void)RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
@@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- WREG32(mmSMC_IND_DATA_11, (v));
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
.need_full_reset = &vi_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe332269..8de0772f986c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif