aboutsummaryrefslogtreecommitdiff
path: root/tools/sched_ext/scx_flatcg.bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/sched_ext/scx_flatcg.bpf.c')
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index b722baf6da4b..4e3afcd260bf 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -341,7 +341,7 @@ s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake
if (is_idle) {
set_bypassed_at(p, taskc);
stat_inc(FCG_STAT_LOCAL);
- scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
}
return cpu;
@@ -377,10 +377,12 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
*/
if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
stat_inc(FCG_STAT_LOCAL);
- scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+ enq_flags);
} else {
stat_inc(FCG_STAT_GLOBAL);
- scx_bpf_dispatch(p, FALLBACK_DSQ, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, FALLBACK_DSQ, SCX_SLICE_DFL,
+ enq_flags);
}
return;
}
@@ -391,7 +393,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
goto out_release;
if (fifo_sched) {
- scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
} else {
u64 tvtime = p->scx.dsq_vtime;
@@ -402,8 +404,8 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
- scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
- tvtime, enq_flags);
+ scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
+ tvtime, enq_flags);
}
cgrp_enqueued(cgrp, cgc);
@@ -663,7 +665,7 @@ static bool try_pick_next_cgroup(u64 *cgidp)
goto out_free;
}
- if (!scx_bpf_consume(cgid)) {
+ if (!scx_bpf_dsq_move_to_local(cgid)) {
bpf_cgroup_release(cgrp);
stat_inc(FCG_STAT_PNC_EMPTY);
goto out_stash;
@@ -743,7 +745,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
goto pick_next_cgroup;
if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
- if (scx_bpf_consume(cpuc->cur_cgid)) {
+ if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
stat_inc(FCG_STAT_CNS_KEEP);
return;
}
@@ -783,7 +785,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
pick_next_cgroup:
cpuc->cur_at = now;
- if (scx_bpf_consume(FALLBACK_DSQ)) {
+ if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ)) {
cpuc->cur_cgid = 0;
return;
}