diff options
author | Jonathan Marek <jonathan@marek.ca> | 2021-06-08 13:27:48 -0400 |
---|---|---|
committer | Rob Clark <robdclark@chromium.org> | 2021-06-23 07:33:55 -0700 |
commit | f6d62d091cfd1c307a1bb83ef46d334d9ac27751 (patch) | |
tree | 9c4535f32643f69f7f89b36a5417aabfa08735b3 /drivers | |
parent | 564499f5ddbb2d8529a460e24ef6bd2e8593c775 (diff) |
drm/msm/a6xx: add support for Adreno 660 GPU
Add adreno_is_{a660,a650_family} helpers and convert update existing
adreno_is_a650 usage based on downstream driver's logic (changing into
adreno_is_a650_family or adding adreno_is_a660).
And add the remaining changes required for A660, again based on
the downstream driver: missing GMU allocations, additional register init,
dummy hfi BW table, cp protect list, entry in gpulist table, hwcg table,
updated a6xx_ucode_check_version check.
Signed-off-by: Jonathan Marek <jonathan@marek.ca>
Link: https://lore.kernel.org/r/20210608172808.11803-6-jonathan@marek.ca
Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 162 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_device.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.h | 13 |
6 files changed, 219 insertions, 36 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b64e62323792..b349692219b7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -519,7 +519,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (!pdcptr) goto err; - if (adreno_is_a650(adreno_gpu)) + if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) pdc_in_aop = true; else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) pdc_address_offset = 0x30090; @@ -549,7 +549,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); /* Load RSC sequencer uCode for sleep and wakeup */ - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650_family(adreno_gpu)) { gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); @@ -597,7 +597,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); - if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu)) + if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); else pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); @@ -698,7 +698,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) u32 itcm_base = 0x00000000; u32 dtcm_base = 0x00040000; - if (adreno_is_a650(adreno_gpu)) + if (adreno_is_a650_family(adreno_gpu)) dtcm_base = 0x10004000; if (gmu->legacy) { @@ -751,7 +751,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) int ret; u32 chipid; - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650_family(adreno_gpu)) { gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); } @@ -1496,12 +1496,28 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; + + /* A660 now requires handling "prealloc requests" in GMU firmware + * For now just hardcode allocations based on the known firmware. + * note: there is no indication that these correspond to "dummy" or + * "debug" regions, but this "guess" allows reusing these BOs which + * are otherwise unused by a660. + */ + gmu->dummy.size = SZ_4K; + if (adreno_is_a660(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); + if (ret) + goto err_memory; + + gmu->dummy.size = SZ_8K; + } + /* Allocate memory for the GMU dummy page */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000); if (ret) goto err_memory; - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, SZ_16M - SZ_16K, 0x04000); if (ret) @@ -1543,7 +1559,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) goto err_memory; } - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650_family(adreno_gpu)) { gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); if (IS_ERR(gmu->rscc)) goto err_mmio; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 16b3bd9ad44c..eacc6ef8c642 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -427,6 +427,59 @@ const struct adreno_reglist a650_hwcg[] = { {}, }; +const struct adreno_reglist a660_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_ISDB_CNT, 0x00000182}, + {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -541,6 +594,51 @@ static const u32 a650_protect[] = { A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */ }; +/* These are for a635 and a660 */ +static const u32 a660_protect[] = { + A6XX_PROTECT_RDONLY(0x00000, 0x04ff), + A6XX_PROTECT_RDONLY(0x00501, 0x0005), + A6XX_PROTECT_RDONLY(0x0050b, 0x02f4), + A6XX_PROTECT_NORDWR(0x0050e, 0x0000), + A6XX_PROTECT_NORDWR(0x00510, 0x0000), + A6XX_PROTECT_NORDWR(0x00534, 0x0000), + A6XX_PROTECT_NORDWR(0x00800, 0x0082), + A6XX_PROTECT_NORDWR(0x008a0, 0x0008), + A6XX_PROTECT_NORDWR(0x008ab, 0x0024), + A6XX_PROTECT_RDONLY(0x008de, 0x00ae), + A6XX_PROTECT_NORDWR(0x00900, 0x004d), + A6XX_PROTECT_NORDWR(0x0098d, 0x0272), + A6XX_PROTECT_NORDWR(0x00e00, 0x0001), + A6XX_PROTECT_NORDWR(0x00e03, 0x000c), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c3), + A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff), + A6XX_PROTECT_NORDWR(0x08630, 0x01cf), + A6XX_PROTECT_NORDWR(0x08e00, 0x0000), + A6XX_PROTECT_NORDWR(0x08e08, 0x0000), + A6XX_PROTECT_NORDWR(0x08e50, 0x001f), + A6XX_PROTECT_NORDWR(0x08e80, 0x027f), + A6XX_PROTECT_NORDWR(0x09624, 0x01db), + A6XX_PROTECT_NORDWR(0x09e60, 0x0011), + A6XX_PROTECT_NORDWR(0x09e78, 0x0187), + A6XX_PROTECT_NORDWR(0x0a630, 0x01cf), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae50, 0x012f), + A6XX_PROTECT_NORDWR(0x0b604, 0x0000), + A6XX_PROTECT_NORDWR(0x0b608, 0x0006), + A6XX_PROTECT_NORDWR(0x0be02, 0x0001), + A6XX_PROTECT_NORDWR(0x0be20, 0x015f), + A6XX_PROTECT_NORDWR(0x0d000, 0x05ff), + A6XX_PROTECT_NORDWR(0x0f000, 0x0bff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1a400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0443), + A6XX_PROTECT_RDONLY(0x1f844, 0x007b), + A6XX_PROTECT_NORDWR(0x1f860, 0x0000), + A6XX_PROTECT_NORDWR(0x1f887, 0x001b), + A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */ +}; + static void a6xx_set_cp_protect(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -554,6 +652,10 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) regs = a650_protect; count = ARRAY_SIZE(a650_protect); count_max = 48; + } else if (adreno_is_a660(adreno_gpu)) { + regs = a660_protect; + count = ARRAY_SIZE(a660_protect); + count_max = 48; } /* @@ -584,7 +686,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu) if (adreno_is_a640(adreno_gpu)) amsbc = 1; - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ lower_bit = 3; amsbc = 1; @@ -648,6 +750,11 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, * Targets up to a640 (a618, a630 and a640) need to check for a * microcode version that is patched to support the whereami opcode or * one that is new enough to include it by default. + * + * a650 tier targets don't need whereami but still need to be + * equal to or newer than 0.95 for other security fixes + * + * a660 targets have all the critical security fixes from the start */ if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || adreno_is_a640(adreno_gpu)) { @@ -671,27 +778,20 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, DRM_DEV_ERROR(&gpu->pdev->dev, "a630 SQE ucode is too old. Have version %x need at least %x\n", buf[0] & 0xfff, 0x190); - } else { - /* - * a650 tier targets don't need whereami but still need to be - * equal to or newer than 0.95 for other security fixes - */ - if (adreno_is_a650(adreno_gpu)) { - if ((buf[0] & 0xfff) >= 0x095) { - ret = true; - goto out; - } - - DRM_DEV_ERROR(&gpu->pdev->dev, - "a650 SQE ucode is too old. Have version %x need at least %x\n", - buf[0] & 0xfff, 0x095); + } else if (adreno_is_a650(adreno_gpu)) { + if ((buf[0] & 0xfff) >= 0x095) { + ret = true; + goto out; } - /* - * When a660 is added those targets should return true here - * since those have all the critical security fixes built in - * from the start - */ + DRM_DEV_ERROR(&gpu->pdev->dev, + "a650 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x095); + } else if (adreno_is_a660(adreno_gpu)) { + ret = true; + } else { + DRM_DEV_ERROR(&gpu->pdev->dev, + "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); } out: msm_gem_put_vaddr(obj); @@ -797,7 +897,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_set_hwcg(gpu, true); /* VBIF/GBIF start*/ - if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) { + if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); @@ -822,7 +922,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); - if (!adreno_is_a650(adreno_gpu)) { + if (!adreno_is_a650_family(adreno_gpu)) { /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000); @@ -835,19 +935,22 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); - if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) + if (adreno_is_a640(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); else gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); + if (adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); + /* Setting the mem pool size */ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); /* Setting the primFifo thresholds default values, * and vccCacheSkipDis=1 bit (0x200) for A640 and newer */ - if (adreno_is_a650(adreno_gpu)) + if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); else if (adreno_is_a640(adreno_gpu)) gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); @@ -872,7 +975,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); /* Set weights for bicubic filtering */ - if (adreno_is_a650(adreno_gpu)) { + if (adreno_is_a650_family(adreno_gpu)) { gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, 0x3fe05ff4); @@ -887,6 +990,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Protect registers from the CP */ a6xx_set_cp_protect(gpu); + if (adreno_is_a660(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); + /* Set dualQ + disable afull for A660 GPU but not for A635 */ + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); + } + /* Enable expanded apriv for targets that support it */ if (gpu->hw_apriv) { gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, @@ -1558,7 +1668,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) */ info = adreno_info(config->rev); - if (info && info->revn == 650) + if (info && (info->revn == 650 || info->revn == 660)) adreno_gpu->base.hw_apriv = true; a6xx_llc_slices_init(pdev, a6xx_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index ccd44d0418f8..919433732b43 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -351,6 +351,37 @@ static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) msg->cnoc_cmds_data[1][0] = 0x60000001; } +static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x500a0; + msg->ddr_cmds_addrs[2] = 0x50000; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50070; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) { /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ @@ -401,6 +432,8 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) a640_build_bw_table(&msg); else if (adreno_is_a650(adreno_gpu)) a650_build_bw_table(&msg); + else if (adreno_is_a660(adreno_gpu)) + a660_build_bw_table(&msg); else a6xx_build_bw_table(&msg); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index b3337b93be91..6dad8015c9a1 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -287,6 +287,19 @@ static const struct adreno_info gpulist[] = { .init = a6xx_gpu_init, .zapfw = "a650_zap.mdt", .hwcg = a650_hwcg, + }, { + .rev = ADRENO_REV(6, 6, 0, ANY_ID), + .revn = 660, + .name = "A660", + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a660_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a660_zap.mdt", + .hwcg = a660_hwcg, }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 3a84a7b312ff..90bc8637ee52 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -239,7 +239,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = adreno_gpu->gmem; return 0; case MSM_PARAM_GMEM_BASE: - *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0; + *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0; return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index ccac275aa7a2..8dbe0d157520 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -55,7 +55,7 @@ struct adreno_reglist { u32 value; }; -extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[]; +extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[]; struct adreno_info { struct adreno_rev rev; @@ -247,6 +247,17 @@ static inline int adreno_is_a650(struct adreno_gpu *gpu) return gpu->revn == 650; } +static inline int adreno_is_a660(struct adreno_gpu *gpu) +{ + return gpu->revn == 660; +} + +/* check for a650, a660, or any derivatives */ +static inline int adreno_is_a650_family(struct adreno_gpu *gpu) +{ + return gpu->revn == 650 || gpu->revn == 620 || gpu->revn == 660; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); |