mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-11-01 23:57:08 +10:00
sched_ext: Fixes for v6.16-rc6
- Fix handling of migration disabled tasks in default idle selection. - update_locked_rq() called __this_cpu_write() spuriously with NULL when @rq was not locked. As the writes were spurious, it didn't break anything directly. However, the function could be called in a preemptible leading to a context warning in __this_cpu_write(). Skip the spurious NULL writes. - Selftest fix on UP. -----BEGIN PGP SIGNATURE----- iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCaHvPZw4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGabMAP4jSAr4gYWEBOUaD9btwnPxZwlSiAEQtqBDBVRb /UunFAD/WBwUPk/u7BchLHjuH3sYW5gQb40kbtUnmNvB+RNUUgc= =3WAD -----END PGP SIGNATURE----- Merge tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext Pull sched_ext fixes from Tejun Heo: - Fix handling of migration disabled tasks in default idle selection - update_locked_rq() called __this_cpu_write() spuriously with NULL when @rq was not locked. As the writes were spurious, it didn't break anything directly. However, the function could be called in a preemptible leading to a context warning in __this_cpu_write(). Skip the spurious NULL writes. - Selftest fix on UP * tag 'sched_ext-for-6.16-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: sched_ext: idle: Handle migration-disabled tasks in idle selection sched/ext: Prevent update_locked_rq() calls with NULL rq selftests/sched_ext: Fix exit selftest hang on UP
This commit is contained in:
commit
bf61759db4
@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void)
|
|||||||
|
|
||||||
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
|
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
|
||||||
do { \
|
do { \
|
||||||
update_locked_rq(rq); \
|
if (rq) \
|
||||||
|
update_locked_rq(rq); \
|
||||||
if (mask) { \
|
if (mask) { \
|
||||||
scx_kf_allow(mask); \
|
scx_kf_allow(mask); \
|
||||||
(sch)->ops.op(args); \
|
(sch)->ops.op(args); \
|
||||||
@ -1280,14 +1281,16 @@ do { \
|
|||||||
} else { \
|
} else { \
|
||||||
(sch)->ops.op(args); \
|
(sch)->ops.op(args); \
|
||||||
} \
|
} \
|
||||||
update_locked_rq(NULL); \
|
if (rq) \
|
||||||
|
update_locked_rq(NULL); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
|
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
|
||||||
({ \
|
({ \
|
||||||
__typeof__((sch)->ops.op(args)) __ret; \
|
__typeof__((sch)->ops.op(args)) __ret; \
|
||||||
\
|
\
|
||||||
update_locked_rq(rq); \
|
if (rq) \
|
||||||
|
update_locked_rq(rq); \
|
||||||
if (mask) { \
|
if (mask) { \
|
||||||
scx_kf_allow(mask); \
|
scx_kf_allow(mask); \
|
||||||
__ret = (sch)->ops.op(args); \
|
__ret = (sch)->ops.op(args); \
|
||||||
@ -1295,7 +1298,8 @@ do { \
|
|||||||
} else { \
|
} else { \
|
||||||
__ret = (sch)->ops.op(args); \
|
__ret = (sch)->ops.op(args); \
|
||||||
} \
|
} \
|
||||||
update_locked_rq(NULL); \
|
if (rq) \
|
||||||
|
update_locked_rq(NULL); \
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@ -903,7 +903,7 @@ s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
|
|||||||
* selection optimizations and simply check whether the previously
|
* selection optimizations and simply check whether the previously
|
||||||
* used CPU is idle and within the allowed cpumask.
|
* used CPU is idle and within the allowed cpumask.
|
||||||
*/
|
*/
|
||||||
if (p->nr_cpus_allowed == 1) {
|
if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) {
|
||||||
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
|
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
|
||||||
scx_idle_test_and_clear_cpu(prev_cpu))
|
scx_idle_test_and_clear_cpu(prev_cpu))
|
||||||
cpu = prev_cpu;
|
cpu = prev_cpu;
|
||||||
|
|||||||
@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
|
|||||||
struct bpf_link *link;
|
struct bpf_link *link;
|
||||||
char buf[16];
|
char buf[16];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On single-CPU systems, ops.select_cpu() is never
|
||||||
|
* invoked, so skip this test to avoid getting stuck
|
||||||
|
* indefinitely.
|
||||||
|
*/
|
||||||
|
if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
|
||||||
|
continue;
|
||||||
|
|
||||||
skel = exit__open();
|
skel = exit__open();
|
||||||
SCX_ENUM_INIT(skel);
|
SCX_ENUM_INIT(skel);
|
||||||
skel->rodata->exit_point = tc;
|
skel->rodata->exit_point = tc;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user