aboutsummaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h20
1 files changed, 18 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cecd4806edc6..508b91d57470 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -528,7 +528,11 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_forceidle_sum;
#endif
+#endif /* CONFIG_SCHEDSTATS */
} ____cacheline_aligned;
struct sched_entity {
@@ -992,8 +996,8 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
- /* PF_IO_WORKER */
- void *pf_io_worker;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
u64 utime;
u64 stime;
@@ -1344,6 +1348,9 @@ struct task_struct {
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events kcsan_save_irqtrace;
#endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ int kcsan_stack_depth;
+#endif
#endif
#if IS_ENABLED(CONFIG_KUNIT)
@@ -2176,6 +2183,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#endif
#ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+ /*
+ * As lock holder preemption issue, we both skip spinning if
+ * task is not on cpu or its cpu is preempted
+ */
+ return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
+}
+
/* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu, unsigned long max);
#endif /* CONFIG_SMP */