sched_ext: Fixes for v6.16-rc6

- Fix handling of migration disabled tasks in default idle selection.
 
 - update_locked_rq() called __this_cpu_write() spuriously with NULL when @rq
   was not locked. As the writes were spurious, it didn't break anything
   directly. However, the function could be called in a preemptible leading
   to a context warning in __this_cpu_write(). Skip the spurious NULL writes.
 
 - Selftest fix on UP.
 -----BEGIN PGP SIGNATURE-----
 
 iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCaHvPZw4cdGpAa2VybmVs
 Lm9yZwAKCRCxYfJx3gVYGabMAP4jSAr4gYWEBOUaD9btwnPxZwlSiAEQtqBDBVRb
 /UunFAD/WBwUPk/u7BchLHjuH3sYW5gQb40kbtUnmNvB+RNUUgc=
 =3WAD
 -----END PGP SIGNATURE-----

Merge tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext

Pull sched_ext fixes from Tejun Heo:

 - Fix handling of migration disabled tasks in default idle selection

 - update_locked_rq() called __this_cpu_write() spuriously with NULL
   when @rq was not locked. As the writes were spurious, it didn't break
   anything directly. However, the function could be called in a
   preemptible leading to a context warning in __this_cpu_write(). Skip
   the spurious NULL writes.

 - Selftest fix on UP

* tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext:
  sched_ext: idle: Handle migration-disabled tasks in idle selection
  sched/ext: Prevent update_locked_rq() calls with NULL rq
  selftests/sched_ext: Fix exit selftest hang on UP
This commit is contained in:
Linus Torvalds 2025-07-19 10:40:30 -07:00
commit bf61759db4
3 changed files with 17 additions and 5 deletions

View File

@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void)
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
do { \
update_locked_rq(rq); \
if (rq) \
update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
(sch)->ops.op(args); \
@ -1280,14 +1281,16 @@ do { \
} else { \
(sch)->ops.op(args); \
} \
update_locked_rq(NULL); \
if (rq) \
update_locked_rq(NULL); \
} while (0)
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
({ \
__typeof__((sch)->ops.op(args)) __ret; \
\
update_locked_rq(rq); \
if (rq) \
update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
__ret = (sch)->ops.op(args); \
@ -1295,7 +1298,8 @@ do { \
} else { \
__ret = (sch)->ops.op(args); \
} \
update_locked_rq(NULL); \
if (rq) \
update_locked_rq(NULL); \
__ret; \
})

View File

@ -903,7 +903,7 @@ s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
* selection optimizations and simply check whether the previously
* used CPU is idle and within the allowed cpumask.
*/
if (p->nr_cpus_allowed == 1) {
if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) {
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
scx_idle_test_and_clear_cpu(prev_cpu))
cpu = prev_cpu;

View File

@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
char buf[16];
/*
* On single-CPU systems, ops.select_cpu() is never
* invoked, so skip this test to avoid getting stuck
* indefinitely.
*/
if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
continue;
skel = exit__open();
SCX_ENUM_INIT(skel);
skel->rodata->exit_point = tc;