From b76a4b44cd8239465ecf4b49a7712023db33fdf5 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 20 Mar 2023 07:43:35 -0700 Subject: drm/msm/gpu: Move BO allocation out of hw_init These allocations are only done the first (successful) time through hw_init() so they won't actually happen in the job_run() path. But lockdep doesn't know this. So dis-entangle them from the hw_init() path. Signed-off-by: Rob Clark Patchwork: https://patchwork.freedesktop.org/patch/527852/ Link: https://lore.kernel.org/r/20230320144356.803762-14-robdclark@gmail.com --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 48 ++++++++++++++---------------- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 46 ++++++++++++++-------------- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++++ drivers/gpu/drm/msm/msm_gpu.h | 6 ++++ 4 files changed, 57 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index a1e006ec5dce..1614ee757350 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -567,7 +567,7 @@ static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, msm_gem_put_vaddr(obj); } -static int a5xx_ucode_init(struct msm_gpu *gpu) +static int a5xx_ucode_load(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); @@ -605,9 +605,24 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); } - gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); + if (a5xx_gpu->has_whereami) { + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a5xx_gpu->shadow_bo, + &a5xx_gpu->shadow_iova); - gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); + if (IS_ERR(a5xx_gpu->shadow)) + return PTR_ERR(a5xx_gpu->shadow); + + msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); + } + } else if (gpu->nr_rings > 1) { + /* Disable preemption if WHERE_AM_I isn't available */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + } return 0; } @@ -900,9 +915,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)) a5xx_gpmu_ucode_init(gpu); - ret = a5xx_ucode_init(gpu); - if (ret) - return ret; + gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); + gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); /* Set the ringbuffer address */ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); @@ -916,27 +930,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); - /* Create a privileged buffer for the RPTR shadow */ - if (a5xx_gpu->has_whereami) { - if (!a5xx_gpu->shadow_bo) { - a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, - sizeof(u32) * gpu->nr_rings, - MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a5xx_gpu->shadow_bo, - &a5xx_gpu->shadow_iova); - - if (IS_ERR(a5xx_gpu->shadow)) - return PTR_ERR(a5xx_gpu->shadow); - - msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); - } - + /* Configure the RPTR shadow if needed: */ + if (a5xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, shadowptr(a5xx_gpu, gpu->rb[0])); - } else if (gpu->nr_rings > 1) { - /* Disable preemption if WHERE_AM_I isn't available */ - a5xx_preempt_fini(gpu); - gpu->nr_rings = 1; } a5xx_preempt_hw_init(gpu); @@ -1682,6 +1679,7 @@ static const struct adreno_gpu_funcs funcs = { .get_param = adreno_get_param, .set_param = adreno_set_param, .hw_init = a5xx_hw_init, + .ucode_load = a5xx_ucode_load, .pm_suspend = a5xx_pm_suspend, .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 1e09777cce3f..0f6ed7a3f712 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -917,7 +917,7 @@ out: return ret; } -static int a6xx_ucode_init(struct msm_gpu *gpu) +static int a6xx_ucode_load(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -946,7 +946,23 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } } - gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); + /* + * Expanded APRIV and targets that support WHERE_AM_I both need a + * privileged buffer to store the RPTR shadow + */ + if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && + !a6xx_gpu->shadow_bo) { + a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->shadow_bo, + &a6xx_gpu->shadow_iova); + + if (IS_ERR(a6xx_gpu->shadow)) + return PTR_ERR(a6xx_gpu->shadow); + + msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); + } return 0; } @@ -1132,9 +1148,7 @@ static int hw_init(struct msm_gpu *gpu) if (ret) goto out; - ret = a6xx_ucode_init(gpu); - if (ret) - goto out; + gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); /* Set the ringbuffer address */ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); @@ -1149,25 +1163,8 @@ static int hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); - /* - * Expanded APRIV and targets that support WHERE_AM_I both need a - * privileged buffer to store the RPTR shadow - */ - - if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { - if (!a6xx_gpu->shadow_bo) { - a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, - sizeof(u32) * gpu->nr_rings, - MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->shadow_bo, - &a6xx_gpu->shadow_iova); - - if (IS_ERR(a6xx_gpu->shadow)) - return PTR_ERR(a6xx_gpu->shadow); - - msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); - } - + /* Configure the RPTR shadow if needed: */ + if (a6xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); } @@ -1958,6 +1955,7 @@ static const struct adreno_gpu_funcs funcs = { .get_param = adreno_get_param, .set_param = adreno_set_param, .hw_init = a6xx_hw_init, + .ucode_load = a6xx_ucode_load, .pm_suspend = a6xx_pm_suspend, .pm_resume = a6xx_pm_resume, .recover = a6xx_recover, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 745f59682737..4d1448714285 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -432,6 +432,12 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) if (ret) return NULL; + if (gpu->funcs->ucode_load) { + ret = gpu->funcs->ucode_load(gpu); + if (ret) + return NULL; + } + /* * Now that we have firmware loaded, and are ready to begin * booting the gpu, go ahead and enable runpm: diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 84c616b1ebc0..7a4fa1b8655b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -49,6 +49,12 @@ struct msm_gpu_funcs { int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, uint32_t param, uint64_t value, uint32_t len); int (*hw_init)(struct msm_gpu *gpu); + + /** + * @ucode_load: Optional hook to upload fw to GEM objs + */ + int (*ucode_load)(struct msm_gpu *gpu); + int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu); void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); -- cgit v1.2.3