diff options
author | Nick Terrell <terrelln@fb.com> | 2022-12-13 16:21:55 -0800 |
---|---|---|
committer | Nick Terrell <terrelln@fb.com> | 2022-12-13 16:21:55 -0800 |
commit | 4f2c0a4acffbec01079c28f839422e64ddeff004 (patch) | |
tree | 06ada4a8a6d94a94c93944806041b8c994cebfc5 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |
parent | 88a309465b3f05a100c3b81966982c0f9f5d23a6 (diff) | |
parent | 830b3c68c1fb1e9176028d02ef86f3cf76aa2476 (diff) | |
download | linux-4f2c0a4acffbec01079c28f839422e64ddeff004.tar.gz linux-4f2c0a4acffbec01079c28f839422e64ddeff004.tar.bz2 linux-4f2c0a4acffbec01079c28f839422e64ddeff004.zip |
Merge branch 'main' into zstd-linus
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 107 |
1 files changed, 69 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index bfc47bea23db..adac650cf544 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +#include "amdgpu_reset.h" static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { @@ -37,6 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_task_info ti; struct amdgpu_device *adev = ring->adev; int idx; + int r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", @@ -47,6 +49,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } memset(&ti, 0, sizeof(struct amdgpu_task_info)); + adev->job_hang = true; if (amdgpu_gpu_recovery && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { @@ -63,7 +66,16 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ti.process_name, ti.tgid, ti.task_name, ti.pid); if (amdgpu_device_should_recover_gpu(ring->adev)) { - amdgpu_device_gpu_recover(ring->adev, job); + struct amdgpu_reset_context reset_context; + memset(&reset_context, 0, sizeof(reset_context)); + + reset_context.method = AMD_RESET_METHOD_NONE; + reset_context.reset_req_dev = adev; + clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + + r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); + if (r) + DRM_ERROR("GPU Recovery Failed: %d\n", r); } else { drm_sched_suspend_timeout(&ring->sched); if (amdgpu_sriov_vf(adev)) @@ -71,6 +83,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } exit: + adev->job_hang = false; drm_dev_exit(idx); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -78,14 +91,10 @@ exit: int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm) { - size_t size = sizeof(struct amdgpu_job); - if (num_ibs == 0) return -EINVAL; - size += sizeof(struct amdgpu_ib) * num_ibs; - - *job = kzalloc(size, GFP_KERNEL); + *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); if (!*job) return -ENOMEM; @@ -95,8 +104,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, */ (*job)->base.sched = &adev->rings[0]->sched; (*job)->vm = vm; - (*job)->ibs = (void *)&(*job)[1]; - (*job)->num_ibs = num_ibs; amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sched_sync); @@ -116,6 +123,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, if (r) return r; + (*job)->num_ibs = 1; r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); if (r) kfree(*job); @@ -123,20 +131,31 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } +void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, + struct amdgpu_bo *gws, struct amdgpu_bo *oa) +{ + if (gds) { + job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; + job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; + } + if (gws) { + job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; + job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; + } + if (oa) { + job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; + job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; + } +} + void amdgpu_job_free_resources(struct amdgpu_job *job) { struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; - struct dma_fence *hw_fence; unsigned i; - if (job->hw_fence.ops == NULL) - hw_fence = job->external_hw_fence; - else - hw_fence = &job->hw_fence; - /* use sched fence if available */ - f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; + f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_ib_free(ring->adev, &job->ibs[i], f); } @@ -150,11 +169,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - /* only put the hw fence if has embedded fence */ - if (job->hw_fence.ops != NULL) - dma_fence_put(&job->hw_fence); - else + /* only put the hw fence if has embedded fence */ + if (!job->hw_fence.ops) kfree(job); + else + dma_fence_put(&job->hw_fence); +} + +void amdgpu_job_set_gang_leader(struct amdgpu_job *job, + struct amdgpu_job *leader) +{ + struct dma_fence *fence = &leader->base.s_fence->scheduled; + + WARN_ON(job->gang_submit); + + /* + * Don't add a reference when we are the gang leader to avoid circle + * dependency. + */ + if (job != leader) + dma_fence_get(fence); + job->gang_submit = fence; } void amdgpu_job_free(struct amdgpu_job *job) @@ -162,12 +197,13 @@ void amdgpu_job_free(struct amdgpu_job *job) amdgpu_job_free_resources(job); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); + if (job->gang_submit != &job->base.s_fence->scheduled) + dma_fence_put(job->gang_submit); - /* only put the hw fence if has embedded fence */ - if (job->hw_fence.ops != NULL) - dma_fence_put(&job->hw_fence); - else + if (!job->hw_fence.ops) kfree(job); + else + dma_fence_put(&job->hw_fence); } int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, @@ -197,15 +233,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, int r; job->base.sched = &ring->sched; - r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); - /* record external_hw_fence for direct submit */ - job->external_hw_fence = dma_fence_get(*fence); + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); + if (r) return r; amdgpu_job_free(job); - dma_fence_put(*fence); - return 0; } @@ -225,6 +258,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, DRM_ERROR("Error adding fence (%d)\n", r); } + if (!fence && job->gang_submit) + fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); + while (fence == NULL && vm && !job->vmid) { r = amdgpu_vmid_grab(vm, ring, &job->sync, &job->base.s_fence->finished, @@ -241,6 +277,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) { struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL, *finished; struct amdgpu_job *job; int r = 0; @@ -252,8 +289,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) trace_amdgpu_sched_run_job(job); - if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter)) - dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ + /* Skip job if VRAM is lost and never resubmit gangs */ + if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) || + (job->job_run_counter && job->gang_submit)) + dma_fence_set_error(finished, -ECANCELED); if (finished->error < 0) { DRM_INFO("Skip scheduling IBs!\n"); @@ -264,10 +303,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) DRM_ERROR("Error scheduling IBs (%d)\n", r); } - if (!job->job_run_counter) - dma_fence_get(fence); - else if (finished->error < 0) - dma_fence_put(&job->hw_fence); job->job_run_counter++; amdgpu_job_free_resources(job); @@ -287,10 +322,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) /* Signal all jobs not yet scheduled */ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; - - if (!rq) - continue; - spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { |