From 2c5bf028ef34745e7b3fe768f9c9355ecc7df101 Mon Sep 17 00:00:00 2001 From: Christian Gmeiner Date: Sun, 23 Aug 2020 21:09:22 +0200 Subject: drm/etnaviv: fix external abort seen on GC600 rev 0x19 It looks like that this GPU core triggers an abort when reading VIVS_HI_CHIP_PRODUCT_ID and/or VIVS_HI_CHIP_ECO_ID. I looked at different versions of Vivante's kernel driver and did not found anything about this issue or what feature flag can be used. So go the simplest route and do not read these two registers on the affected GPU core. Signed-off-by: Christian Gmeiner Reported-by: Josua Mayer Fixes: 815e45bbd4d3 ("drm/etnaviv: determine product, customer and eco id") Cc: stable@vger.kernel.org Tested-by: Josua Mayer Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d5a4cd85a0f6..c6404b8d067f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -337,9 +337,16 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); - gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); - gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + + /* + * Reading these two registers on GC600 rev 0x19 result in a + * unhandled fault: external abort on non-linefetch + */ + if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { + gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); + gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + } /* * !!!! HACK ALERT !!!! -- cgit v1.2.3 From 50248a3ec0f5e5debd18033eb2a29f0b793a7000 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 24 Aug 2020 12:55:37 +0200 Subject: drm/etnaviv: always start/stop scheduler in timeout processing The drm scheduler currently expects that the stop/start sequence is always executed in the timeout handling, as the job at the head of the hardware execution list is always removed from the ring mirror before the driver function is called and only inserted back into the list when starting the scheduler. This adds some unnecessary overhead if the timeout handler determines that the GPU is still executing jobs normally and just wished to extend the timeout, but a better solution requires a major rearchitecture of the scheduler, which is not applicable as a fix. Fixes: 135517d3565b ("drm/scheduler: Avoid accessing freed bad job.") Signed-off-by: Lucas Stach Tested-by: Russell King --- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 4e3e95dce6d8..cd46c882269c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) u32 dma_addr; int change; + /* block scheduler */ + drm_sched_stop(&gpu->sched, sched_job); + /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ if (dma_fence_is_signaled(submit->out_fence)) - return; + goto out_no_timeout; /* * If the GPU is still making forward progress on the front-end (which @@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) change = dma_addr - gpu->hangcheck_dma_addr; if (change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; - return; + goto out_no_timeout; } - /* block scheduler */ - drm_sched_stop(&gpu->sched, sched_job); - if(sched_job) drm_sched_increase_karma(sched_job); @@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) drm_sched_resubmit_jobs(&gpu->sched); +out_no_timeout: /* restart scheduler after GPU is usable again */ drm_sched_start(&gpu->sched, true); } -- cgit v1.2.3