sched_ext: Changes for v6.16

- More in-kernel idle CPU selection improvements. Expand topology awareness
   coverage add scx_bpf_select_cpu_and() to allow more flexibility. The idle
   CPU selection kfuncs can now be called from unlocked contexts too.
 
 - A bunch of reorganization changes to lay the foundation for multiple
   hierarchical scheduler support. This isn't ready yet and the included
   changes don't make meaningful behavior differences. One notable change is
   replacing some static_key tests with dynamic tests as the test results may
   differ depending on the scheduler instance. This isn't expected to cause
   meaningful performance difference.
 
 - Other minor and doc updates.
 
 - There were multiple patches in for-6.15-fixes which conflicted with
   changes in for-6.16. for-6.15-fixes were pulled three times into for-6.16
   to resolve the conflicts.
 -----BEGIN PGP SIGNATURE-----
 
 iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCaDYZMw4cdGpAa2VybmVs
 Lm9yZwAKCRCxYfJx3gVYGfbcAQDRloVb/d5RfC6VYlue9EV1jHuoJefTYHvR3jmO
 ju70EQEAjLBXw58XAePQ9La/570JELgsC5FzJp3tLTilGx2JyQA=
 =7cDG
 -----END PGP SIGNATURE-----

Merge tag 'sched_ext-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext updates from Tejun Heo:

 - More in-kernel idle CPU selection improvements. Expand topology
   awareness coverage add scx_bpf_select_cpu_and() to allow more
   flexibility. The idle CPU selection kfuncs can now be called from
   unlocked contexts too.

 - A bunch of reorganization changes to lay the foundation for multiple
   hierarchical scheduler support. This isn't ready yet and the included
   changes don't make meaningful behavior differences. One notable
   change is replacing some static_key tests with dynamic tests as the
   test results may differ depending on the scheduler instance. This
   isn't expected to cause meaningful performance difference.

 - Other minor and doc updates.

 - There were multiple patches in for-6.15-fixes which conflicted with
   changes in for-6.16. for-6.15-fixes were pulled three times into
   for-6.16 to resolve the conflicts.

* tag 'sched_ext-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: (49 commits)
  sched_ext: Call ops.update_idle() after updating builtin idle bits
  sched_ext, docs: convert mentions of "CFS" to "fair-class scheduler"
  selftests/sched_ext: Update test enq_select_cpu_fails
  sched_ext: idle: Consolidate default idle CPU selection kfuncs
  selftests/sched_ext: Add test for scx_bpf_select_cpu_and() via test_run
  sched_ext: idle: Allow scx_bpf_select_cpu_and() from unlocked context
  sched_ext: idle: Validate locking correctness in scx_bpf_select_cpu_and()
  sched_ext: Make scx_kf_allowed_if_unlocked() available outside ext.c
  sched_ext, docs: add label
  sched_ext: Explain the temporary situation around scx_root dereferences
  sched_ext: Add @sch to SCX_CALL_OP*()
  sched_ext: Cleanup [__]scx_exit/error*()
  sched_ext: Add @sch to SCX_CALL_OP*()
  sched_ext: Clean up scx_root usages
  Documentation: scheduler: Changed lowercase acronyms to uppercase
  sched_ext: Avoid NULL scx_root deref in __scx_exit()
  sched_ext: Add RCU protection to scx_root in DSQ iterator
  sched_ext: Clean up SCX_EXIT_NONE handling in scx_disable_workfn()
  sched_ext: Move disable machinery into scx_sched
  sched_ext: Move event_stats_cpu into scx_sched
  ...
This commit is contained in:
Linus Torvalds 2025-05-27 21:12:50 -07:00
commit feacb1774b
17 changed files with 1667 additions and 994 deletions

View File

@ -1,3 +1,5 @@
.. _sched-ext:
==========================
Extensible Scheduler Class
==========================
@ -47,8 +49,8 @@ options should be enabled to use sched_ext:
sched_ext is used only when the BPF scheduler is loaded and running.
If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be
treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is
loaded.
treated as ``SCHED_NORMAL`` and scheduled by the fair-class scheduler until the
BPF scheduler is loaded.
When the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is not set
in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
@ -57,11 +59,11 @@ in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is
set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and
``SCHED_IDLE`` policies are scheduled by CFS.
``SCHED_IDLE`` policies are scheduled by the fair-class scheduler.
Terminating the sched_ext scheduler program, triggering `SysRq-S`, or
detection of any internal error including stalled runnable tasks aborts the
BPF scheduler and reverts all tasks back to CFS.
BPF scheduler and reverts all tasks back to the fair-class scheduler.
.. code-block:: none
@ -197,8 +199,8 @@ Dispatch Queues
To match the impedance between the scheduler core and the BPF scheduler,
sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a
priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``),
and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and
and one local DSQ per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
an arbitrary number of DSQs using ``scx_bpf_create_dsq()`` and
``scx_bpf_destroy_dsq()``.
A CPU always executes a task from its local DSQ. A task is "inserted" into a

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,11 @@
*/
#ifdef CONFIG_SCHED_CLASS_EXT
static inline bool scx_kf_allowed_if_unlocked(void)
{
return !current->scx.kf_mask;
}
DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
void scx_tick(struct rq *rq);
@ -21,6 +26,7 @@ void scx_rq_activate(struct rq *rq);
void scx_rq_deactivate(struct rq *rq);
int scx_check_setscheduler(struct task_struct *p, int policy);
bool task_should_scx(int policy);
bool scx_allow_ttwu_queue(const struct task_struct *p);
void init_sched_ext_class(void);
static inline u32 scx_cpuperf_target(s32 cpu)
@ -36,13 +42,6 @@ static inline bool task_on_scx(const struct task_struct *p)
return scx_enabled() && p->sched_class == &ext_sched_class;
}
static inline bool scx_allow_ttwu_queue(const struct task_struct *p)
{
return !scx_enabled() ||
static_branch_likely(&scx_ops_allow_queued_wakeup) ||
p->sched_class != &ext_sched_class;
}
#ifdef CONFIG_SCHED_CORE
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi);

View File

@ -46,6 +46,13 @@ static struct scx_idle_cpus scx_idle_global_masks;
*/
static struct scx_idle_cpus **scx_idle_node_masks;
/*
* Local per-CPU cpumasks (used to generate temporary idle cpumasks).
*/
static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask);
static DEFINE_PER_CPU(cpumask_var_t, local_llc_idle_cpumask);
static DEFINE_PER_CPU(cpumask_var_t, local_numa_idle_cpumask);
/*
* Return the idle masks associated to a target @node.
*
@ -391,6 +398,14 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
}
/*
* Return true if @p can run on all possible CPUs, false otherwise.
*/
static inline bool task_affinity_all(const struct task_struct *p)
{
return p->nr_cpus_allowed >= num_possible_cpus();
}
/*
* Built-in CPU idle selection policy:
*
@ -403,13 +418,15 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
* branch prediction optimizations.
*
* 3. Pick a CPU within the same LLC (Last-Level Cache):
* - if the above conditions aren't met, pick a CPU that shares the same LLC
* to maintain cache locality.
* - if the above conditions aren't met, pick a CPU that shares the same
* LLC, if the LLC domain is a subset of @cpus_allowed, to maintain
* cache locality.
*
* 4. Pick a CPU within the same NUMA node, if enabled:
* - choose a CPU from the same NUMA node to reduce memory access latency.
* - choose a CPU from the same NUMA node, if the node cpumask is a
* subset of @cpus_allowed, to reduce memory access latency.
*
* 5. Pick any idle CPU usable by the task.
* 5. Pick any idle CPU within the @cpus_allowed domain.
*
* Step 3 and 4 are performed only if the system has, respectively,
* multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
@ -424,35 +441,77 @@ void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
* NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
* we never call ops.select_cpu() for them, see select_task_rq().
*/
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags)
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
const struct cpumask *llc_cpus = NULL;
const struct cpumask *numa_cpus = NULL;
const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL;
const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr;
int node = scx_cpu_node_if_enabled(prev_cpu);
s32 cpu;
preempt_disable();
/*
* Determine the subset of CPUs usable by @p within @cpus_allowed.
*/
if (allowed != p->cpus_ptr) {
struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_idle_cpumask);
if (task_affinity_all(p)) {
allowed = cpus_allowed;
} else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) {
allowed = local_cpus;
} else {
cpu = -EBUSY;
goto out_enable;
}
/*
* If @prev_cpu is not in the allowed CPUs, skip topology
* optimizations and try to pick any idle CPU usable by the
* task.
*
* If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, prioritize
* the current node, as it may optimize some waker->wakee
* workloads.
*/
if (!cpumask_test_cpu(prev_cpu, allowed)) {
node = scx_cpu_node_if_enabled(smp_processor_id());
cpu = scx_pick_idle_cpu(allowed, node, flags);
goto out_enable;
}
}
/*
* This is necessary to protect llc_cpus.
*/
rcu_read_lock();
/*
* Determine the scheduling domain only if the task is allowed to run
* on all CPUs.
* Determine the subset of CPUs that the task can use in its
* current LLC and node.
*
* This is done primarily for efficiency, as it avoids the overhead of
* updating a cpumask every time we need to select an idle CPU (which
* can be costly in large SMP systems), but it also aligns logically:
* if a task's scheduling domain is restricted by user-space (through
* CPU affinity), the task will simply use the flat scheduling domain
* defined by user-space.
* If the task can run on all CPUs, use the node and LLC cpumasks
* directly.
*/
if (p->nr_cpus_allowed >= num_possible_cpus()) {
if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
numa_cpus = numa_span(prev_cpu);
if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) {
struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask);
const struct cpumask *cpus = numa_span(prev_cpu);
if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
llc_cpus = llc_span(prev_cpu);
if (allowed == p->cpus_ptr && task_affinity_all(p))
numa_cpus = cpus;
else if (cpus && cpumask_and(local_cpus, allowed, cpus))
numa_cpus = local_cpus;
}
if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask);
const struct cpumask *cpus = llc_span(prev_cpu);
if (allowed == p->cpus_ptr && task_affinity_all(p))
llc_cpus = cpus;
else if (cpus && cpumask_and(local_cpus, allowed, cpus))
llc_cpus = local_cpus;
}
/*
@ -490,7 +549,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
(!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
!cpumask_empty(idle_cpumask(waker_node)->cpu)) {
if (cpumask_test_cpu(cpu, p->cpus_ptr))
if (cpumask_test_cpu(cpu, allowed))
goto out_unlock;
}
}
@ -535,7 +594,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* begin in prev_cpu's node and proceed to other nodes in
* order of increasing distance.
*/
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE);
cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
if (cpu >= 0)
goto out_unlock;
@ -583,10 +642,12 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* in prev_cpu's node and proceed to other nodes in order of
* increasing distance.
*/
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
cpu = scx_pick_idle_cpu(allowed, node, flags);
out_unlock:
rcu_read_unlock();
out_enable:
preempt_enable();
return cpu;
}
@ -596,7 +657,7 @@ out_unlock:
*/
void scx_idle_init_masks(void)
{
int node;
int i;
/* Allocate global idle cpumasks */
BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
@ -607,13 +668,23 @@ void scx_idle_init_masks(void)
sizeof(*scx_idle_node_masks), GFP_KERNEL);
BUG_ON(!scx_idle_node_masks);
for_each_node(node) {
scx_idle_node_masks[node] = kzalloc_node(sizeof(**scx_idle_node_masks),
GFP_KERNEL, node);
BUG_ON(!scx_idle_node_masks[node]);
for_each_node(i) {
scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks),
GFP_KERNEL, i);
BUG_ON(!scx_idle_node_masks[i]);
BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->cpu, GFP_KERNEL, node));
BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->smt, GFP_KERNEL, node));
BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i));
BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i));
}
/* Allocate local per-cpu idle cpumasks */
for_each_possible_cpu(i) {
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i),
GFP_KERNEL, cpu_to_node(i)));
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i),
GFP_KERNEL, cpu_to_node(i)));
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i),
GFP_KERNEL, cpu_to_node(i)));
}
}
@ -662,20 +733,11 @@ static void update_builtin_idle(int cpu, bool idle)
*/
void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
{
struct scx_sched *sch = scx_root;
int cpu = cpu_of(rq);
lockdep_assert_rq_held(rq);
/*
* Trigger ops.update_idle() only when transitioning from a task to
* the idle thread and vice versa.
*
* Idle transitions are indicated by do_notify being set to true,
* managed by put_prev_task_idle()/set_next_task_idle().
*/
if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
SCX_CALL_OP(SCX_KF_REST, update_idle, rq, cpu_of(rq), idle);
/*
* Update the idle masks:
* - for real idle transitions (do_notify == true)
@ -693,6 +755,21 @@ void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
if (static_branch_likely(&scx_builtin_idle_enabled))
if (do_notify || is_idle_task(rq->curr))
update_builtin_idle(cpu, idle);
/*
* Trigger ops.update_idle() only when transitioning from a task to
* the idle thread and vice versa.
*
* Idle transitions are indicated by do_notify being set to true,
* managed by put_prev_task_idle()/set_next_task_idle().
*
* This must come after builtin idle update so that BPF schedulers can
* create interlocking between ops.update_idle() and ops.enqueue() -
* either enqueue() sees the idle bit or update_idle() sees the task
* that enqueue() queued.
*/
if (SCX_HAS_OP(sch, update_idle) && do_notify && !scx_rq_bypassing(rq))
SCX_CALL_OP(sch, SCX_KF_REST, update_idle, rq, cpu_of(rq), idle);
}
static void reset_idle_masks(struct sched_ext_ops *ops)
@ -748,7 +825,7 @@ void scx_idle_disable(void)
static int validate_node(int node)
{
if (!static_branch_likely(&scx_builtin_idle_per_node)) {
scx_ops_error("per-node idle tracking is disabled");
scx_kf_error("per-node idle tracking is disabled");
return -EOPNOTSUPP;
}
@ -758,13 +835,13 @@ static int validate_node(int node)
/* Make sure node is in a valid range */
if (node < 0 || node >= nr_node_ids) {
scx_ops_error("invalid node %d", node);
scx_kf_error("invalid node %d", node);
return -EINVAL;
}
/* Make sure the node is part of the set of possible nodes */
if (!node_possible(node)) {
scx_ops_error("unavailable node %d", node);
scx_kf_error("unavailable node %d", node);
return -EINVAL;
}
@ -778,10 +855,72 @@ static bool check_builtin_idle_enabled(void)
if (static_branch_likely(&scx_builtin_idle_enabled))
return true;
scx_ops_error("built-in idle tracking is disabled");
scx_kf_error("built-in idle tracking is disabled");
return false;
}
s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *allowed, u64 flags)
{
struct rq *rq;
struct rq_flags rf;
s32 cpu;
if (!kf_cpu_valid(prev_cpu, NULL))
return -EINVAL;
if (!check_builtin_idle_enabled())
return -EBUSY;
/*
* If called from an unlocked context, acquire the task's rq lock,
* so that we can safely access p->cpus_ptr and p->nr_cpus_allowed.
*
* Otherwise, allow to use this kfunc only from ops.select_cpu()
* and ops.select_enqueue().
*/
if (scx_kf_allowed_if_unlocked()) {
rq = task_rq_lock(p, &rf);
} else {
if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE))
return -EPERM;
rq = scx_locked_rq();
}
/*
* Validate locking correctness to access p->cpus_ptr and
* p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
* otherwise, assert that p->pi_lock is held.
*/
if (!rq)
lockdep_assert_held(&p->pi_lock);
#ifdef CONFIG_SMP
/*
* This may also be called from ops.enqueue(), so we need to handle
* per-CPU tasks as well. For these tasks, we can skip all idle CPU
* selection optimizations and simply check whether the previously
* used CPU is idle and within the allowed cpumask.
*/
if (p->nr_cpus_allowed == 1) {
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
scx_idle_test_and_clear_cpu(prev_cpu))
cpu = prev_cpu;
else
cpu = -EBUSY;
} else {
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
allowed ?: p->cpus_ptr, flags);
}
#else
cpu = -EBUSY;
#endif
if (scx_kf_allowed_if_unlocked())
task_rq_unlock(rq, p, &rf);
return cpu;
}
/**
* scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
* trigger an error if @cpu is invalid
@ -790,7 +929,7 @@ static bool check_builtin_idle_enabled(void)
__bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
{
#ifdef CONFIG_NUMA
if (!ops_cpu_valid(cpu, NULL))
if (!kf_cpu_valid(cpu, NULL))
return NUMA_NO_NODE;
return cpu_to_node(cpu);
@ -806,9 +945,10 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
* @wake_flags: %SCX_WAKE_* flags
* @is_idle: out parameter indicating whether the returned CPU is idle
*
* Can only be called from ops.select_cpu() if the built-in CPU selection is
* enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
* @p, @prev_cpu and @wake_flags match ops.select_cpu().
* Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
* context such as a BPF test_run() call, as long as built-in CPU selection
* is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
* is set.
*
* Returns the picked CPU with *@is_idle indicating whether the picked CPU is
* currently idle and thus a good candidate for direct dispatching.
@ -816,31 +956,44 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
#ifdef CONFIG_SMP
s32 cpu;
#endif
if (!ops_cpu_valid(prev_cpu, NULL))
goto prev_cpu;
if (!check_builtin_idle_enabled())
goto prev_cpu;
if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
goto prev_cpu;
#ifdef CONFIG_SMP
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0);
if (cpu >= 0) {
*is_idle = true;
return cpu;
}
#endif
prev_cpu:
*is_idle = false;
return prev_cpu;
}
/**
* scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p,
* prioritizing those in @cpus_allowed
* @p: task_struct to select a CPU for
* @prev_cpu: CPU @p was on previously
* @wake_flags: %SCX_WAKE_* flags
* @cpus_allowed: cpumask of allowed CPUs
* @flags: %SCX_PICK_IDLE* flags
*
* Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
* context such as a BPF test_run() call, as long as built-in CPU selection
* is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
* is set.
*
* @p, @prev_cpu and @wake_flags match ops.select_cpu().
*
* Returns the selected idle CPU, which will be automatically awakened upon
* returning from ops.select_cpu() and can be used for direct dispatch, or
* a negative value if no idle CPU is available.
*/
__bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags);
}
/**
* scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the
* idle-tracking per-CPU cpumask of a target NUMA node.
@ -848,7 +1001,7 @@ prev_cpu:
*
* Returns an empty cpumask if idle tracking is not enabled, if @node is
* not valid, or running on a UP kernel. In this case the actual error will
* be reported to the BPF scheduler via scx_ops_error().
* be reported to the BPF scheduler via scx_error().
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
{
@ -873,7 +1026,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
return cpu_none_mask;
}
@ -895,7 +1048,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
*
* Returns an empty cpumask if idle tracking is not enabled, if @node is
* not valid, or running on a UP kernel. In this case the actual error will
* be reported to the BPF scheduler via scx_ops_error().
* be reported to the BPF scheduler via scx_error().
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
{
@ -924,7 +1077,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
return cpu_none_mask;
}
@ -971,7 +1124,7 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
if (!check_builtin_idle_enabled())
return false;
if (ops_cpu_valid(cpu, NULL))
if (kf_cpu_valid(cpu, NULL))
return scx_idle_test_and_clear_cpu(cpu);
else
return false;
@ -1032,7 +1185,7 @@ __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
u64 flags)
{
if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
scx_ops_error("per-node idle tracking is enabled");
scx_kf_error("per-node idle tracking is enabled");
return -EBUSY;
}
@ -1109,7 +1262,7 @@ __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
s32 cpu;
if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
scx_ops_error("per-node idle tracking is enabled");
scx_kf_error("per-node idle tracking is enabled");
return -EBUSY;
}
@ -1140,6 +1293,8 @@ BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_idle)
static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
@ -1147,21 +1302,11 @@ static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
.set = &scx_kfunc_ids_idle,
};
BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
.owner = THIS_MODULE,
.set = &scx_kfunc_ids_select_cpu,
};
int scx_idle_init(void)
{
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) ||
register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle);

View File

@ -27,7 +27,8 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node
}
#endif /* CONFIG_SMP */
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags);
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags);
void scx_idle_enable(struct sched_ext_ops *ops);
void scx_idle_disable(void);
int scx_idle_init(void);

View File

@ -1736,10 +1736,10 @@ extern struct balance_callback balance_push_callback;
#ifdef CONFIG_SCHED_CLASS_EXT
extern const struct sched_class ext_sched_class;
DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
DECLARE_STATIC_KEY_FALSE(__scx_enabled); /* SCX BPF scheduler loaded */
DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
#define scx_enabled() static_branch_unlikely(&__scx_enabled)
#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
static inline void scx_rq_clock_update(struct rq *rq, u64 clock)

View File

@ -61,8 +61,8 @@ SCXOBJ_DIR := $(OBJ_DIR)/sched_ext
BINDIR := $(OUTPUT_DIR)/bin
BPFOBJ := $(BPFOBJ_DIR)/libbpf.a
ifneq ($(CROSS_COMPILE),)
HOST_BUILD_DIR := $(OBJ_DIR)/host
HOST_OUTPUT_DIR := host-tools
HOST_BUILD_DIR := $(OBJ_DIR)/host/obj
HOST_OUTPUT_DIR := $(OBJ_DIR)/host
HOST_INCLUDE_DIR := $(HOST_OUTPUT_DIR)/include
else
HOST_BUILD_DIR := $(OBJ_DIR)
@ -98,7 +98,7 @@ ifneq ($(LLVM),)
CFLAGS += -Wno-unused-command-line-argument
endif
LDFLAGS = -lelf -lz -lpthread
LDFLAGS += -lelf -lz -lpthread
IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
@ -136,14 +136,25 @@ $(MAKE_DIRS):
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(OBJ_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \
$(Q)$(MAKE) $(submake_extras) CROSS_COMPILE=$(CROSS_COMPILE) \
-C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \
EXTRA_CFLAGS='-g -O0 -fPIC' \
LDFLAGS="$(LDFLAGS)" \
DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \
EXTRA_CFLAGS='-g -O0 -fPIC' \
DESTDIR=$(HOST_OUTPUT_DIR) prefix= all install_headers
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \
EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
@ -185,7 +196,7 @@ $(addprefix $(BINDIR)/,$(c-sched-targets)): \
$(SCX_COMMON_DEPS)
$(eval sched=$(notdir $@))
$(CC) $(CFLAGS) -c $(sched).c -o $(SCXOBJ_DIR)/$(sched).o
$(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(HOST_BPFOBJ) $(LDFLAGS)
$(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(BPFOBJ) $(LDFLAGS)
$(c-sched-targets): %: $(BINDIR)/%

View File

@ -48,6 +48,8 @@ static inline void ___vmlinux_h_sanity_check___(void)
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;

View File

@ -784,8 +784,8 @@ static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST));
bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING",
scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING));
bpf_printk("%35s: %lld", "SCX_EV_ENQ_SLICE_DFL",
scx_read_event(&events, SCX_EV_ENQ_SLICE_DFL));
bpf_printk("%35s: %lld", "SCX_EV_REFILL_SLICE_DFL",
scx_read_event(&events, SCX_EV_REFILL_SLICE_DFL));
bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION",
scx_read_event(&events, SCX_EV_BYPASS_DURATION));
bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH",

View File

@ -24,19 +24,19 @@ def read_atomic(name):
def read_static_key(name):
return prog[name].key.enabled.counter.value_()
def ops_state_str(state):
return prog['scx_ops_enable_state_str'][state].string_().decode()
def state_str(state):
return prog['scx_enable_state_str'][state].string_().decode()
ops = prog['scx_ops']
enable_state = read_atomic("scx_ops_enable_state_var")
enable_state = read_atomic("scx_enable_state_var")
print(f'ops : {ops.name.string_().decode()}')
print(f'enabled : {read_static_key("__scx_ops_enabled")}')
print(f'enabled : {read_static_key("__scx_enabled")}')
print(f'switching_all : {read_int("scx_switching_all")}')
print(f'switched_all : {read_static_key("__scx_switched_all")}')
print(f'enable_state : {ops_state_str(enable_state)} ({enable_state})')
print(f'enable_state : {state_str(enable_state)} ({enable_state})')
print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}')
print(f'breather_depth: {read_atomic("scx_ops_breather_depth")}')
print(f'bypass_depth : {prog["scx_ops_bypass_depth"].value_()}')
print(f'breather_depth: {read_atomic("scx_breather_depth")}')
print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}')
print(f'nr_rejected : {read_atomic("scx_nr_rejected")}')
print(f'enable_seq : {read_atomic("scx_enable_seq")}')

View File

@ -162,10 +162,10 @@ all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubs
auto-test-targets := \
create_dsq \
enq_last_no_enq_fails \
enq_select_cpu_fails \
ddsp_bogus_dsq_fail \
ddsp_vtimelocal_fail \
dsp_local_on \
enq_select_cpu \
exit \
hotplug \
init_enable_count \
@ -173,6 +173,7 @@ auto-test-targets := \
maybe_null \
minimal \
numa \
allowed_cpus \
prog_run \
reload_loop \
select_cpu_dfl \

View File

@ -0,0 +1,144 @@
// SPDX-License-Identifier: GPL-2.0
/*
* A scheduler that validates the behavior of scx_bpf_select_cpu_and() by
* selecting idle CPUs strictly within a subset of allowed CPUs.
*
* Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
*/
#include <scx/common.bpf.h>
char _license[] SEC("license") = "GPL";
UEI_DEFINE(uei);
private(PREF_CPUS) struct bpf_cpumask __kptr * allowed_cpumask;
static void
validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu)
{
if (scx_bpf_test_and_clear_cpu_idle(cpu))
scx_bpf_error("CPU %d should be marked as busy", cpu);
if (bpf_cpumask_subset(allowed, p->cpus_ptr) &&
!bpf_cpumask_test_cpu(cpu, allowed))
scx_bpf_error("CPU %d not in the allowed domain for %d (%s)",
cpu, p->pid, p->comm);
}
s32 BPF_STRUCT_OPS(allowed_cpus_select_cpu,
struct task_struct *p, s32 prev_cpu, u64 wake_flags)
{
const struct cpumask *allowed;
s32 cpu;
allowed = cast_mask(allowed_cpumask);
if (!allowed) {
scx_bpf_error("allowed domain not initialized");
return -EINVAL;
}
/*
* Select an idle CPU strictly within the allowed domain.
*/
cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0);
if (cpu >= 0) {
validate_idle_cpu(p, allowed, cpu);
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
return cpu;
}
return prev_cpu;
}
void BPF_STRUCT_OPS(allowed_cpus_enqueue, struct task_struct *p, u64 enq_flags)
{
const struct cpumask *allowed;
s32 prev_cpu = scx_bpf_task_cpu(p), cpu;
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
allowed = cast_mask(allowed_cpumask);
if (!allowed) {
scx_bpf_error("allowed domain not initialized");
return;
}
/*
* Use scx_bpf_select_cpu_and() to proactively kick an idle CPU
* within @allowed_cpumask, usable by @p.
*/
cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0);
if (cpu >= 0) {
validate_idle_cpu(p, allowed, cpu);
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
}
}
s32 BPF_STRUCT_OPS_SLEEPABLE(allowed_cpus_init)
{
struct bpf_cpumask *mask;
mask = bpf_cpumask_create();
if (!mask)
return -ENOMEM;
mask = bpf_kptr_xchg(&allowed_cpumask, mask);
if (mask)
bpf_cpumask_release(mask);
bpf_rcu_read_lock();
/*
* Assign the first online CPU to the allowed domain.
*/
mask = allowed_cpumask;
if (mask) {
const struct cpumask *online = scx_bpf_get_online_cpumask();
bpf_cpumask_set_cpu(bpf_cpumask_first(online), mask);
scx_bpf_put_cpumask(online);
}
bpf_rcu_read_unlock();
return 0;
}
void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei)
{
UEI_RECORD(uei, ei);
}
struct task_cpu_arg {
pid_t pid;
};
SEC("syscall")
int select_cpu_from_user(struct task_cpu_arg *input)
{
struct task_struct *p;
int cpu;
p = bpf_task_from_pid(input->pid);
if (!p)
return -EINVAL;
bpf_rcu_read_lock();
cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0);
bpf_rcu_read_unlock();
bpf_task_release(p);
return cpu;
}
SEC(".struct_ops.link")
struct sched_ext_ops allowed_cpus_ops = {
.select_cpu = (void *)allowed_cpus_select_cpu,
.enqueue = (void *)allowed_cpus_enqueue,
.init = (void *)allowed_cpus_init,
.exit = (void *)allowed_cpus_exit,
.name = "allowed_cpus",
};

View File

@ -0,0 +1,84 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
*/
#include <bpf/bpf.h>
#include <scx/common.h>
#include <sys/wait.h>
#include <unistd.h>
#include "allowed_cpus.bpf.skel.h"
#include "scx_test.h"
static enum scx_test_status setup(void **ctx)
{
struct allowed_cpus *skel;
skel = allowed_cpus__open();
SCX_FAIL_IF(!skel, "Failed to open");
SCX_ENUM_INIT(skel);
SCX_FAIL_IF(allowed_cpus__load(skel), "Failed to load skel");
*ctx = skel;
return SCX_TEST_PASS;
}
static int test_select_cpu_from_user(const struct allowed_cpus *skel)
{
int fd, ret;
__u64 args[1];
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
args[0] = getpid();
fd = bpf_program__fd(skel->progs.select_cpu_from_user);
if (fd < 0)
return fd;
ret = bpf_prog_test_run_opts(fd, &attr);
if (ret < 0)
return ret;
fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval);
return 0;
}
static enum scx_test_status run(void *ctx)
{
struct allowed_cpus *skel = ctx;
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops);
SCX_FAIL_IF(!link, "Failed to attach scheduler");
/* Pick an idle CPU from user-space */
SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU");
/* Just sleeping is fine, plenty of scheduling events happening */
sleep(1);
SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
bpf_link__destroy(link);
return SCX_TEST_PASS;
}
static void cleanup(void *ctx)
{
struct allowed_cpus *skel = ctx;
allowed_cpus__destroy(skel);
}
struct scx_test allowed_cpus = {
.name = "allowed_cpus",
.description = "Verify scx_bpf_select_cpu_and()",
.setup = setup,
.run = run,
.cleanup = cleanup,
};
REGISTER_SCX_TEST(&allowed_cpus)

View File

@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2023 David Vernet <dvernet@meta.com>
* Copyright (c) 2023 Tejun Heo <tj@kernel.org>
*/
#include <scx/common.bpf.h>
char _license[] SEC("license") = "GPL";
UEI_DEFINE(uei);
s32 BPF_STRUCT_OPS(enq_select_cpu_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
/* Bounce all tasks to ops.enqueue() */
return prev_cpu;
}
void BPF_STRUCT_OPS(enq_select_cpu_enqueue, struct task_struct *p,
u64 enq_flags)
{
s32 cpu, prev_cpu = scx_bpf_task_cpu(p);
bool found = false;
cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, 0, &found);
if (found) {
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, enq_flags);
return;
}
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}
void BPF_STRUCT_OPS(enq_select_cpu_exit, struct scx_exit_info *ei)
{
UEI_RECORD(uei, ei);
}
struct task_cpu_arg {
pid_t pid;
};
SEC("syscall")
int select_cpu_from_user(struct task_cpu_arg *input)
{
struct task_struct *p;
bool found = false;
s32 cpu;
p = bpf_task_from_pid(input->pid);
if (!p)
return -EINVAL;
bpf_rcu_read_lock();
cpu = scx_bpf_select_cpu_dfl(p, bpf_get_smp_processor_id(), 0, &found);
if (!found)
cpu = -EBUSY;
bpf_rcu_read_unlock();
bpf_task_release(p);
return cpu;
}
SEC(".struct_ops.link")
struct sched_ext_ops enq_select_cpu_ops = {
.select_cpu = (void *)enq_select_cpu_select_cpu,
.enqueue = (void *)enq_select_cpu_enqueue,
.exit = (void *)enq_select_cpu_exit,
.name = "enq_select_cpu",
.timeout_ms = 1000U,
};

View File

@ -0,0 +1,88 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2023 David Vernet <dvernet@meta.com>
* Copyright (c) 2023 Tejun Heo <tj@kernel.org>
*/
#include <bpf/bpf.h>
#include <scx/common.h>
#include <sys/wait.h>
#include <unistd.h>
#include "enq_select_cpu.bpf.skel.h"
#include "scx_test.h"
static enum scx_test_status setup(void **ctx)
{
struct enq_select_cpu *skel;
skel = enq_select_cpu__open();
SCX_FAIL_IF(!skel, "Failed to open");
SCX_ENUM_INIT(skel);
SCX_FAIL_IF(enq_select_cpu__load(skel), "Failed to load skel");
*ctx = skel;
return SCX_TEST_PASS;
}
static int test_select_cpu_from_user(const struct enq_select_cpu *skel)
{
int fd, ret;
__u64 args[1];
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
args[0] = getpid();
fd = bpf_program__fd(skel->progs.select_cpu_from_user);
if (fd < 0)
return fd;
ret = bpf_prog_test_run_opts(fd, &attr);
if (ret < 0)
return ret;
fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval);
return 0;
}
static enum scx_test_status run(void *ctx)
{
struct enq_select_cpu *skel = ctx;
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_ops);
if (!link) {
SCX_ERR("Failed to attach scheduler");
return SCX_TEST_FAIL;
}
/* Pick an idle CPU from user-space */
SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU");
sleep(1);
SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
bpf_link__destroy(link);
return SCX_TEST_PASS;
}
static void cleanup(void *ctx)
{
struct enq_select_cpu *skel = ctx;
enq_select_cpu__destroy(skel);
}
struct scx_test enq_select_cpu = {
.name = "enq_select_cpu",
.description = "Verify scx_bpf_select_cpu_dfl() from multiple contexts",
.setup = setup,
.run = run,
.cleanup = cleanup,
};
REGISTER_SCX_TEST(&enq_select_cpu)

View File

@ -1,43 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2023 David Vernet <dvernet@meta.com>
* Copyright (c) 2023 Tejun Heo <tj@kernel.org>
*/
#include <scx/common.bpf.h>
char _license[] SEC("license") = "GPL";
/* Manually specify the signature until the kfunc is added to the scx repo. */
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
bool *found) __ksym;
s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
return prev_cpu;
}
void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
u64 enq_flags)
{
/*
* Need to initialize the variable or the verifier will fail to load.
* Improving these semantics is actively being worked on.
*/
bool found = false;
/* Can only call from ops.select_cpu() */
scx_bpf_select_cpu_dfl(p, 0, 0, &found);
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}
SEC(".struct_ops.link")
struct sched_ext_ops enq_select_cpu_fails_ops = {
.select_cpu = (void *) enq_select_cpu_fails_select_cpu,
.enqueue = (void *) enq_select_cpu_fails_enqueue,
.name = "enq_select_cpu_fails",
.timeout_ms = 1000U,
};

View File

@ -1,61 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2023 David Vernet <dvernet@meta.com>
* Copyright (c) 2023 Tejun Heo <tj@kernel.org>
*/
#include <bpf/bpf.h>
#include <scx/common.h>
#include <sys/wait.h>
#include <unistd.h>
#include "enq_select_cpu_fails.bpf.skel.h"
#include "scx_test.h"
static enum scx_test_status setup(void **ctx)
{
struct enq_select_cpu_fails *skel;
skel = enq_select_cpu_fails__open();
SCX_FAIL_IF(!skel, "Failed to open");
SCX_ENUM_INIT(skel);
SCX_FAIL_IF(enq_select_cpu_fails__load(skel), "Failed to load skel");
*ctx = skel;
return SCX_TEST_PASS;
}
static enum scx_test_status run(void *ctx)
{
struct enq_select_cpu_fails *skel = ctx;
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_fails_ops);
if (!link) {
SCX_ERR("Failed to attach scheduler");
return SCX_TEST_FAIL;
}
sleep(1);
bpf_link__destroy(link);
return SCX_TEST_PASS;
}
static void cleanup(void *ctx)
{
struct enq_select_cpu_fails *skel = ctx;
enq_select_cpu_fails__destroy(skel);
}
struct scx_test enq_select_cpu_fails = {
.name = "enq_select_cpu_fails",
.description = "Verify we fail to call scx_bpf_select_cpu_dfl() "
"from ops.enqueue()",
.setup = setup,
.run = run,
.cleanup = cleanup,
};
REGISTER_SCX_TEST(&enq_select_cpu_fails)