diff options
author | Alex Deucher <alexdeucher@gmail.com> | 2010-03-24 13:33:47 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-04-09 10:16:00 +1000 |
commit | 32fcdbf4084544c3d8fa413004d57e5dc6f2eefe (patch) | |
tree | 179aaa9296f8ad63028d7274d0eed8039d428dce | |
parent | 747943ea187e5acceb7ffc762ff2c84cb3449745 (diff) |
drm/radeon/kms/evergreen: implement gfx init
This initializes the gfx engine so accel can
eventually be used.
Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 556 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreend.h | 105 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 25 |
3 files changed, 668 insertions, 18 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a6130a494c56..26b219bb1388 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -434,24 +434,572 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) return 0; } - +#endif /* * Core functions */ -static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, +static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, + u32 num_tile_pipes, u32 num_backends, u32 backend_disable_mask) { u32 backend_map = 0; + u32 enabled_backends_mask = 0; + u32 enabled_backends_count = 0; + u32 cur_pipe; + u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; + u32 cur_backend = 0; + u32 i; + bool force_no_swizzle; + + if (num_tile_pipes > EVERGREEN_MAX_PIPES) + num_tile_pipes = EVERGREEN_MAX_PIPES; + if (num_tile_pipes < 1) + num_tile_pipes = 1; + if (num_backends > EVERGREEN_MAX_BACKENDS) + num_backends = EVERGREEN_MAX_BACKENDS; + if (num_backends < 1) + num_backends = 1; + + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((backend_disable_mask >> i) & 1) == 0) { + enabled_backends_mask |= (1 << i); + ++enabled_backends_count; + } + if (enabled_backends_count == num_backends) + break; + } + + if (enabled_backends_count == 0) { + enabled_backends_mask = 1; + enabled_backends_count = 1; + } + + if (enabled_backends_count != num_backends) + num_backends = enabled_backends_count; + + memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_REDWOOD: + force_no_swizzle = false; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + case CHIP_JUNIPER: + default: + force_no_swizzle = true; + break; + } + if (force_no_swizzle) { + bool last_backend_enabled = false; + + force_no_swizzle = false; + for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { + if (((enabled_backends_mask >> i) & 1) == 1) { + if (last_backend_enabled) + force_no_swizzle = true; + last_backend_enabled = true; + } else + last_backend_enabled = false; + } + } + + switch (num_tile_pipes) { + case 1: + case 3: + case 5: + case 7: + DRM_ERROR("odd number of pipes!\n"); + break; + case 2: + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + break; + case 4: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 1; + swizzle_pipe[3] = 3; + } + break; + case 6: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 1; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 5; + } + break; + case 8: + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + swizzle_pipe[7] = 7; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 1; + swizzle_pipe[5] = 3; + swizzle_pipe[6] = 5; + swizzle_pipe[7] = 7; + } + break; + } + + for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { + while (((1 << cur_backend) & enabled_backends_mask) == 0) + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + + backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); + + cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; + } return backend_map; } -#endif static void evergreen_gpu_init(struct radeon_device *rdev) { - /* XXX */ + u32 cc_rb_backend_disable = 0; + u32 cc_gc_shader_pipe_config; + u32 gb_addr_config = 0; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 gb_backend_map; + u32 grbm_gfx_index; + u32 sx_debug_1; + u32 smx_dc_ctl0; + u32 sq_config; + u32 sq_lds_resource_mgmt; + u32 sq_gpr_resource_mgmt_1; + u32 sq_gpr_resource_mgmt_2; + u32 sq_gpr_resource_mgmt_3; + u32 sq_thread_resource_mgmt; + u32 sq_thread_resource_mgmt_2; + u32 sq_stack_resource_mgmt_1; + u32 sq_stack_resource_mgmt_2; + u32 sq_stack_resource_mgmt_3; + u32 vgt_cache_invalidation; + u32 hdp_host_path_cntl; + int i, j, num_shader_engines, ps_thread_count; + + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + rdev->config.evergreen.num_ses = 2; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 8; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_JUNIPER: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 10; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_REDWOOD: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 5; + rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_CEDAR: + default: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 2; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + } + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x18) { + WREG32((0x2c14 + j), 0x00000000); + WREG32((0x2c18 + j), 0x00000000); + WREG32((0x2c1c + j), 0x00000000); + WREG32((0x2c20 + j), 0x00000000); + WREG32((0x2c24 + j), 0x00000000); + } + + WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; + + cc_gc_shader_pipe_config |= + INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) + & EVERGREEN_MAX_PIPES_MASK); + cc_gc_shader_pipe_config |= + INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) + & EVERGREEN_MAX_SIMDS_MASK); + + cc_rb_backend_disable = + BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) + & EVERGREEN_MAX_BACKENDS_MASK); + + + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + + switch (rdev->config.evergreen.max_tile_pipes) { + case 1: + default: + gb_addr_config |= NUM_PIPES(0); + break; + case 2: + gb_addr_config |= NUM_PIPES(1); + break; + case 4: + gb_addr_config |= NUM_PIPES(2); + break; + case 8: + gb_addr_config |= NUM_PIPES(3); + break; + } + + gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + gb_addr_config |= BANK_INTERLEAVE_SIZE(0); + gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); + gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); + gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ + gb_addr_config |= MULTI_GPU_TILE_SIZE(2); + + if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) + gb_addr_config |= ROW_SIZE(2); + else + gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); + + if (rdev->ddev->pdev->device == 0x689e) { + u32 efuse_straps_4; + u32 efuse_straps_3; + u8 efuse_box_bit_131_124; + + WREG32(RCU_IND_INDEX, 0x204); + efuse_straps_4 = RREG32(RCU_IND_DATA); + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); + + switch(efuse_box_bit_131_124) { + case 0x00: + gb_backend_map = 0x76543210; + break; + case 0x55: + gb_backend_map = 0x77553311; + break; + case 0x56: + gb_backend_map = 0x77553300; + break; + case 0x59: + gb_backend_map = 0x77552211; + break; + case 0x66: + gb_backend_map = 0x77443300; + break; + case 0x99: + gb_backend_map = 0x66552211; + break; + case 0x5a: + gb_backend_map = 0x77552200; + break; + case 0xaa: + gb_backend_map = 0x66442200; + break; + case 0x95: + gb_backend_map = 0x66553311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else if (rdev->ddev->pdev->device == 0x68b9) { + u32 efuse_straps_3; + u8 efuse_box_bit_127_124; + + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; + + switch(efuse_box_bit_127_124) { + case 0x0: + gb_backend_map = 0x00003210; + break; + case 0x5: + case 0x6: + case 0x9: + case 0xa: + gb_backend_map = 0x00003311; + break; + default: + DRM_ERROR("bad backend map, using default\n"); + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + break; + } + } else + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + + WREG32(GB_BACKEND_MAP, gb_backend_map); + WREG32(GB_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CONFIG, gb_addr_config); + WREG32(HDP_ADDR_CONFIG, gb_addr_config); + + num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; + grbm_gfx_index = INSTANCE_BROADCAST_WRITES; + + for (i = 0; i < rdev->config.evergreen.num_ses; i++) { + u32 rb = cc_rb_backend_disable | (0xf0 << 16); + u32 sp = cc_gc_shader_pipe_config; + u32 gfx = grbm_gfx_index | SE_INDEX(i); + + if (i == num_shader_engines) { + rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); + sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); + } + + WREG32(GRBM_GFX_INDEX, gfx); + WREG32(RLC_GFX_INDEX, gfx); + + WREG32(CC_RB_BACKEND_DISABLE, rb); + WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); + WREG32(GC_USER_RB_BACKEND_DISABLE, rb); + WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); + } + + grbm_gfx_index |= SE_BROADCAST_WRITES; + WREG32(GRBM_GFX_INDEX, grbm_gfx_index); + WREG32(RLC_GFX_INDEX, grbm_gfx_index); + + WREG32(CGTS_SYS_TCC_DISABLE, 0); + WREG32(CGTS_TCC_DISABLE, 0); + WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); + WREG32(CGTS_USER_TCC_DISABLE, 0); + + /* set HW defaults for 3D engine */ + WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | + ROQ_IB2_START(0x2b))); + + WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); + + WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | + SYNC_GRADIENT | + SYNC_WALKER | + SYNC_ALIGNER)); + + sx_debug_1 = RREG32(SX_DEBUG_1); + sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; + WREG32(SX_DEBUG_1, sx_debug_1); + + + smx_dc_ctl0 = RREG32(SMX_DC_CTL0); + smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + WREG32(SMX_DC_CTL0, smx_dc_ctl0); + + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + + WREG32(VGT_NUM_INSTANCES, 1); + WREG32(SPI_CONFIG_CNTL, 0); + WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); + WREG32(CP_PERFMON_CNTL, 0); + + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + FETCH_FIFO_HIWATER(0x4) | + DONE_FIFO_HIWATER(0xe0) | + ALU_UPDATE_FIFO_HIWATER(0x8))); + + sq_config = RREG32(SQ_CONFIG); + sq_config &= ~(PS_PRIO(3) | + VS_PRIO(3) | + GS_PRIO(3) | + ES_PRIO(3)); + sq_config |= (VC_ENABLE | + EXPORT_SRC_C | + PS_PRIO(0) | + VS_PRIO(1) | + GS_PRIO(2) | + ES_PRIO(3)); + + if (rdev->family == CHIP_CEDAR) + /* no vertex cache */ + sq_config &= ~VC_ENABLE; + + sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); + + sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); + sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); + sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); + sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); + sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); + + if (rdev->family == CHIP_CEDAR) + ps_thread_count = 96; + else + ps_thread_count = 128; + + sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); + sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; + + sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); + + WREG32(SQ_CONFIG, sq_config); + WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); + WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); + WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); + WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); + WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); + WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); + WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); + WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); + WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); + + WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | + FORCE_EOV_MAX_REZ_CNT(255))); + + if (rdev->family == CHIP_CEDAR) + vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); + else + vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); + vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); + WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); + + WREG32(VGT_GS_VERTEX_REUSE, 16); + WREG32(PA_SC_LINE_STIPPLE_STATE, 0); + + WREG32(CB_PERF_CTR0_SEL_0, 0); + WREG32(CB_PERF_CTR0_SEL_1, 0); + WREG32(CB_PERF_CTR1_SEL_0, 0); + WREG32(CB_PERF_CTR1_SEL_1, 0); + WREG32(CB_PERF_CTR2_SEL_0, 0); + WREG32(CB_PERF_CTR2_SEL_1, 0); + WREG32(CB_PERF_CTR3_SEL_0, 0); + WREG32(CB_PERF_CTR3_SEL_1, 0); + + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); + WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); + + WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); + + udelay(50); + } int evergreen_mc_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 7c290a6dd0e3..effe335356c7 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -24,12 +24,49 @@ #ifndef EVERGREEND_H #define EVERGREEND_H +#define EVERGREEN_MAX_SH_GPRS 256 +#define EVERGREEN_MAX_TEMP_GPRS 16 +#define EVERGREEN_MAX_SH_THREADS 256 +#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096 +#define EVERGREEN_MAX_FRC_EOV_CNT 16384 +#define EVERGREEN_MAX_BACKENDS 8 +#define EVERGREEN_MAX_BACKENDS_MASK 0xFF +#define EVERGREEN_MAX_SIMDS 16 +#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF +#define EVERGREEN_MAX_PIPES 8 +#define EVERGREEN_MAX_PIPES_MASK 0xFF +#define EVERGREEN_MAX_LDS_NUM 0xFFFF + /* Registers */ -#define CC_GC_SHADER_PIPE_CONFIG 0x8950 -#define CC_RB_BACKEND_DISABLE 0x98F4 -#define BACKEND_DISABLE(x) ((x) << 16) +#define RCU_IND_INDEX 0x100 +#define RCU_IND_DATA 0x104 + +#define GRBM_GFX_INDEX 0x802C +#define INSTANCE_INDEX(x) ((x) << 0) +#define SE_INDEX(x) ((x) << 16) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) +#define RLC_GFX_INDEX 0x3fC4 +#define CC_GC_SHADER_PIPE_CONFIG 0x8950 +#define WRITE_DIS (1 << 0) +#define CC_RB_BACKEND_DISABLE 0x98F4 +#define BACKEND_DISABLE(x) ((x) << 16) +#define GB_ADDR_CONFIG 0x98F8 +#define NUM_PIPES(x) ((x) << 0) +#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) +#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) +#define NUM_SHADER_ENGINES(x) ((x) << 12) +#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) +#define NUM_GPUS(x) ((x) << 20) +#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) +#define ROW_SIZE(x) ((x) << 28) +#define GB_BACKEND_MAP 0x98FC +#define DMIF_ADDR_CONFIG 0xBD4 +#define HDP_ADDR_CONFIG 0x2F48 + #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 +#define GC_USER_RB_BACKEND_DISABLE 0x9B7C #define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_TCC_DISABLE 0x9148 @@ -38,9 +75,9 @@ #define CONFIG_MEMSIZE 0x5428 -#define CP_ME_CNTL 0x86D8 -#define CP_ME_HALT (1<<28) -#define CP_PFP_HALT (1<<26) +#define CP_ME_CNTL 0x86D8 +#define CP_ME_HALT (1 << 28) +#define CP_PFP_HALT (1 << 26) #define CP_ME_RAM_DATA 0xC160 #define CP_ME_RAM_RADDR 0xC158 #define CP_ME_RAM_WADDR 0xC15C @@ -53,10 +90,10 @@ #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_CNTL 0xC104 -#define RB_BUFSZ(x) ((x)<<0) -#define RB_BLKSZ(x) ((x)<<8) -#define RB_NO_UPDATE (1<<27) -#define RB_RPTR_WR_ENA (1<<31) +#define RB_BUFSZ(x) ((x) << 0) +#define RB_BLKSZ(x) ((x) << 8) +#define RB_NO_UPDATE (1 << 27) +#define RB_RPTR_WR_ENA (1 << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C @@ -184,9 +221,10 @@ #define PA_SC_FIFO_SIZE 0x8BCC #define SC_PRIM_FIFO_SIZE(x) ((x) << 0) #define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12) +#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20) #define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24 -#define FORCE_EOV_MAX_CLK_CNT(x) ((x)<<0) -#define FORCE_EOV_MAX_REZ_CNT(x) ((x)<<16) +#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) +#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) #define PA_SC_LINE_STIPPLE 0x28A0C #define PA_SC_LINE_STIPPLE_STATE 0x8B10 @@ -203,7 +241,7 @@ #define SMX_DC_CTL0 0xA020 #define USE_HASH_FUNCTION (1 << 0) -#define CACHE_DEPTH(x) ((x) << 1) +#define NUMBER_OF_SETS(x) ((x) << 1) #define FLUSH_ALL_ON_EVENT (1 << 10) #define STALL_ON_EVENT (1 << 11) #define SMX_EVENT_CTL 0xA02C @@ -234,6 +272,13 @@ #define SQ_CONFIG 0x8C00 #define VC_ENABLE (1 << 0) #define EXPORT_SRC_C (1 << 1) +#define CS_PRIO(x) ((x) << 18) +#define LS_PRIO(x) ((x) << 20) +#define HS_PRIO(x) ((x) << 22) +#define PS_PRIO(x) ((x) << 24) +#define VS_PRIO(x) ((x) << 26) +#define GS_PRIO(x) ((x) << 28) +#define ES_PRIO(x) ((x) << 30) #define SQ_GPR_RESOURCE_MGMT_1 0x8C04 #define NUM_PS_GPRS(x) ((x) << 0) #define NUM_VS_GPRS(x) ((x) << 16) @@ -241,6 +286,29 @@ #define SQ_GPR_RESOURCE_MGMT_2 0x8C08 #define NUM_GS_GPRS(x) ((x) << 0) #define NUM_ES_GPRS(x) ((x) << 16) +#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C +#define NUM_HS_GPRS(x) ((x) << 0) +#define NUM_LS_GPRS(x) ((x) << 16) +#define SQ_THREAD_RESOURCE_MGMT 0x8C18 +#define NUM_PS_THREADS(x) ((x) << 0) +#define NUM_VS_THREADS(x) ((x) << 8) +#define NUM_GS_THREADS(x) ((x) << 16) +#define NUM_ES_THREADS(x) ((x) << 24) +#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C +#define NUM_HS_THREADS(x) ((x) << 0) +#define NUM_LS_THREADS(x) ((x) << 8) +#define SQ_STACK_RESOURCE_MGMT_1 0x8C20 +#define NUM_PS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_VS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_2 0x8C24 +#define NUM_GS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_ES_STACK_ENTRIES(x) ((x) << 16) +#define SQ_STACK_RESOURCE_MGMT_3 0x8C28 +#define NUM_HS_STACK_ENTRIES(x) ((x) << 0) +#define NUM_LS_STACK_ENTRIES(x) ((x) << 16) +#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C +#define SQ_LDS_RESOURCE_MGMT 0x8E2C + #define SQ_MS_FIFO_SIZES 0x8CF0 #define CACHE_FIFO_SIZE(x) ((x) << 0) #define FETCH_FIFO_HIWATER(x) ((x) << 8) @@ -255,6 +323,15 @@ #define SMX_BUFFER_SIZE(x) ((x) << 16) #define SX_MISC 0x28350 +#define CB_PERF_CTR0_SEL_0 0x9A20 +#define CB_PERF_CTR0_SEL_1 0x9A24 +#define CB_PERF_CTR1_SEL_0 0x9A28 +#define CB_PERF_CTR1_SEL_1 0x9A2C +#define CB_PERF_CTR2_SEL_0 0x9A30 +#define CB_PERF_CTR2_SEL_1 0x9A34 +#define CB_PERF_CTR3_SEL_0 0x9A38 +#define CB_PERF_CTR3_SEL_1 0x9A3C + #define TA_CNTL_AUX 0x9508 #define DISABLE_CUBE_WRAP (1 << 0) #define DISABLE_CUBE_ANISO (1 << 1) @@ -263,7 +340,7 @@ #define SYNC_ALIGNER (1 << 26) #define VGT_CACHE_INVALIDATION 0x88C4 -#define CACHE_INVALIDATION(x) ((x)<<0) +#define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 #define TC_ONLY 1 #define VC_AND_TC 2 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4ac97ab28947..a77a86203996 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -870,11 +870,36 @@ struct rv770_asic { struct r100_gpu_lockup lockup; }; +struct evergreen_asic { + unsigned num_ses; + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; + unsigned sx_num_of_sets; + unsigned sc_prim_fifo_size; + unsigned sc_hiz_tile_fifo_size; + unsigned sc_earlyz_tile_fifo_size; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; +}; + union radeon_asic_config { struct r300_asic r300; struct r100_asic r100; struct r600_asic r600; struct rv770_asic rv770; + struct evergreen_asic evergreen; }; /* |