diff options
author | Dave Airlie <airlied@redhat.com> | 2015-08-27 13:00:28 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-08-27 13:00:28 +1000 |
commit | 40b2dffbcc67e92d5df97785dffc68fe88605bfa (patch) | |
tree | 91276b6ae4210791ad4494adaf69a56b16c7b0ac /drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |
parent | db56176025cee5e242dfeed5f4e304d095d29fa3 (diff) | |
parent | c2b6bd7e91aad8440a2f55bdbde6f5a8ae19fac5 (diff) | |
download | linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.tar.gz linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.tar.bz2 linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.zip |
Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux into drm-next
- DP fixes for radeon and amdgpu
- IH ring fix for tonga and fiji
- Lots of GPU scheduler fixes
- Misc additional fixes
* 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux: (42 commits)
drm/amdgpu: fix wait queue handling in the scheduler
drm/amdgpu: remove extra parameters from scheduler callbacks
drm/amdgpu: wake up scheduler only when neccessary
drm/amdgpu: remove entity idle timeout v2
drm/amdgpu: fix postclose order
drm/amdgpu: use IB for copy buffer of eviction
drm/amdgpu: adjust the judgement of removing fence callback
drm/amdgpu: fix no sync_wait in copy_buffer
drm/amdgpu: fix last_vm_update fence is not effetive for sched fence
drm/amdgpu: add priv data to sched
drm/amdgpu: add owner for sched fence
drm/amdgpu: remove entity reference from sched fence
drm/amdgpu: fix and cleanup amd_sched_entity_push_job
drm/amdgpu: remove amdgpu_bo_list_clone
drm/amdgpu: remove the context from amdgpu_job
drm/amdgpu: remove unused parameters to amd_sched_create
drm/amdgpu: remove sched_lock
drm/amdgpu: remove prepare_job callback
drm/amdgpu: cleanup a scheduler function name
drm/amdgpu: reorder scheduler functions
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 94 |
1 files changed, 35 insertions, 59 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index a86e38158afa..f93fb3541488 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -27,55 +27,28 @@ #include <drm/drmP.h> #include "amdgpu.h" -static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_job *job) +static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) { - int r = 0; - struct amdgpu_cs_parser *sched_job; - if (!job || !job->data) { - DRM_ERROR("job is null\n"); - return -EINVAL; - } - - sched_job = (struct amdgpu_cs_parser *)job->data; - if (sched_job->prepare_job) { - r = sched_job->prepare_job(sched_job); - if (r) { - DRM_ERROR("Prepare job error\n"); - schedule_work(&sched_job->job_work); - } - } - return r; -} - -static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_job *job) -{ - int r = 0; - struct amdgpu_cs_parser *sched_job; + struct amdgpu_job *sched_job; struct amdgpu_fence *fence; + int r; - if (!job || !job->data) { + if (!job) { DRM_ERROR("job is null\n"); return NULL; } - sched_job = (struct amdgpu_cs_parser *)job->data; + sched_job = (struct amdgpu_job *)job; mutex_lock(&sched_job->job_lock); r = amdgpu_ib_schedule(sched_job->adev, sched_job->num_ibs, sched_job->ibs, - sched_job->filp); + sched_job->base.owner); if (r) goto err; fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); - if (sched_job->run_job) { - r = sched_job->run_job(sched_job); - if (r) - goto err; - } + if (sched_job->free_job) + sched_job->free_job(sched_job); mutex_unlock(&sched_job->job_lock); return &fence->base; @@ -83,25 +56,25 @@ static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, err: DRM_ERROR("Run job error\n"); mutex_unlock(&sched_job->job_lock); - schedule_work(&sched_job->job_work); + job->sched->ops->process_job(job); return NULL; } -static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, - struct amd_sched_job *job) +static void amdgpu_sched_process_job(struct amd_sched_job *job) { - struct amdgpu_cs_parser *sched_job; + struct amdgpu_job *sched_job; - if (!job || !job->data) { + if (!job) { DRM_ERROR("job is null\n"); return; } - sched_job = (struct amdgpu_cs_parser *)job->data; - schedule_work(&sched_job->job_work); + sched_job = (struct amdgpu_job *)job; + /* after processing job, free memory */ + fence_put(&sched_job->base.s_fence->base); + kfree(sched_job); } struct amd_sched_backend_ops amdgpu_sched_ops = { - .prepare_job = amdgpu_sched_prepare_job, .run_job = amdgpu_sched_run_job, .process_job = amdgpu_sched_process_job }; @@ -110,36 +83,39 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_ib *ibs, unsigned num_ibs, - int (*free_job)(struct amdgpu_cs_parser *), + int (*free_job)(struct amdgpu_job *), void *owner, struct fence **f) { int r = 0; if (amdgpu_enable_scheduler) { - struct amdgpu_cs_parser *sched_job = - amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, - ibs, num_ibs); - if(!sched_job) { + struct amdgpu_job *job = + kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); + if (!job) return -ENOMEM; - } - sched_job->free_job = free_job; - mutex_lock(&sched_job->job_lock); - r = amd_sched_push_job(ring->scheduler, - &adev->kernel_ctx.rings[ring->idx].entity, - sched_job, &sched_job->s_fence); + job->base.sched = ring->scheduler; + job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->adev = adev; + job->ibs = ibs; + job->num_ibs = num_ibs; + job->base.owner = owner; + mutex_init(&job->job_lock); + job->free_job = free_job; + mutex_lock(&job->job_lock); + r = amd_sched_entity_push_job((struct amd_sched_job *)job); if (r) { - mutex_unlock(&sched_job->job_lock); - kfree(sched_job); + mutex_unlock(&job->job_lock); + kfree(job); return r; } - ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; - *f = fence_get(&sched_job->s_fence->base); - mutex_unlock(&sched_job->job_lock); + *f = fence_get(&job->base.s_fence->base); + mutex_unlock(&job->job_lock); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) return r; *f = fence_get(&ibs[num_ibs - 1].fence->base); } + return 0; } |