diff options
author | Dave Airlie <airlied@redhat.com> | 2015-08-27 13:00:28 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-08-27 13:00:28 +1000 |
commit | 40b2dffbcc67e92d5df97785dffc68fe88605bfa (patch) | |
tree | 91276b6ae4210791ad4494adaf69a56b16c7b0ac /drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |
parent | db56176025cee5e242dfeed5f4e304d095d29fa3 (diff) | |
parent | c2b6bd7e91aad8440a2f55bdbde6f5a8ae19fac5 (diff) | |
download | linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.tar.gz linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.tar.bz2 linux-40b2dffbcc67e92d5df97785dffc68fe88605bfa.zip |
Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux into drm-next
- DP fixes for radeon and amdgpu
- IH ring fix for tonga and fiji
- Lots of GPU scheduler fixes
- Misc additional fixes
* 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux: (42 commits)
drm/amdgpu: fix wait queue handling in the scheduler
drm/amdgpu: remove extra parameters from scheduler callbacks
drm/amdgpu: wake up scheduler only when neccessary
drm/amdgpu: remove entity idle timeout v2
drm/amdgpu: fix postclose order
drm/amdgpu: use IB for copy buffer of eviction
drm/amdgpu: adjust the judgement of removing fence callback
drm/amdgpu: fix no sync_wait in copy_buffer
drm/amdgpu: fix last_vm_update fence is not effetive for sched fence
drm/amdgpu: add priv data to sched
drm/amdgpu: add owner for sched fence
drm/amdgpu: remove entity reference from sched fence
drm/amdgpu: fix and cleanup amd_sched_entity_push_job
drm/amdgpu: remove amdgpu_bo_list_clone
drm/amdgpu: remove the context from amdgpu_job
drm/amdgpu: remove unused parameters to amd_sched_create
drm/amdgpu: remove sched_lock
drm/amdgpu: remove prepare_job callback
drm/amdgpu: cleanup a scheduler function name
drm/amdgpu: reorder scheduler functions
...
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.h')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 69 |
1 files changed, 19 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index ceb5918bfbeb..e797796dcad7 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,8 +27,6 @@ #include <linux/kfifo.h> #include <linux/fence.h> -#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 - struct amd_gpu_scheduler; struct amd_sched_rq; @@ -41,20 +39,12 @@ struct amd_sched_rq; struct amd_sched_entity { struct list_head list; struct amd_sched_rq *belongto_rq; - spinlock_t lock; - /* the virtual_seq is unique per context per ring */ - atomic64_t last_queued_v_seq; - atomic64_t last_signaled_v_seq; + atomic_t fence_seq; /* the job_queue maintains the jobs submitted by clients */ struct kfifo job_queue; spinlock_t queue_lock; struct amd_gpu_scheduler *scheduler; - wait_queue_head_t wait_queue; - wait_queue_head_t wait_emit; - bool is_pending; uint64_t fence_context; - char name[20]; - bool need_wakeup; }; /** @@ -63,26 +53,24 @@ struct amd_sched_entity { * the next entity to emit commands from. */ struct amd_sched_rq { - struct mutex lock; + spinlock_t lock; struct list_head entities; struct amd_sched_entity *current_entity; }; struct amd_sched_fence { struct fence base; - struct fence_cb cb; - struct amd_sched_entity *entity; - uint64_t v_seq; + struct amd_gpu_scheduler *scheduler; spinlock_t lock; + void *owner; }; struct amd_sched_job { - struct list_head list; struct fence_cb cb; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; - void *data; struct amd_sched_fence *s_fence; + void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -101,61 +89,42 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) * these functions should be implemented in driver side */ struct amd_sched_backend_ops { - int (*prepare_job)(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *c_entity, - struct amd_sched_job *job); - struct fence *(*run_job)(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *c_entity, - struct amd_sched_job *job); - void (*process_job)(struct amd_gpu_scheduler *sched, - struct amd_sched_job *job); + struct fence *(*run_job)(struct amd_sched_job *job); + void (*process_job)(struct amd_sched_job *job); }; /** * One scheduler is implemented for each hardware ring */ struct amd_gpu_scheduler { - void *device; struct task_struct *thread; struct amd_sched_rq sched_rq; struct amd_sched_rq kernel_rq; - struct list_head active_hw_rq; - atomic64_t hw_rq_count; + atomic_t hw_rq_count; struct amd_sched_backend_ops *ops; uint32_t ring_id; - uint32_t granularity; /* in ms unit */ - uint32_t preemption; - wait_queue_head_t wait_queue; - struct amd_sched_entity *current_entity; - struct mutex sched_lock; - spinlock_t queue_lock; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; uint32_t hw_submission_limit; + char name[20]; + void *priv; }; -struct amd_gpu_scheduler *amd_sched_create(void *device, - struct amd_sched_backend_ops *ops, - uint32_t ring, - uint32_t granularity, - uint32_t preemption, - uint32_t hw_submission); +struct amd_gpu_scheduler * +amd_sched_create(struct amd_sched_backend_ops *ops, + uint32_t ring, uint32_t hw_submission, void *priv); int amd_sched_destroy(struct amd_gpu_scheduler *sched); -int amd_sched_push_job(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *c_entity, - void *data, - struct amd_sched_fence **fence); - int amd_sched_entity_init(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, struct amd_sched_rq *rq, uint32_t jobs); -int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity); - -uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity); +void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity); +int amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( - struct amd_sched_entity *s_entity); + struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_signal(struct amd_sched_fence *fence); |