mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
synced 2025-09-20 12:28:53 +10:00
As pointed out by commit
de9b8f5dcb
("sched: Fix crash trying to dequeue/enqueue the idle thread")
init_idle() can and will be invoked more than once on the same idle
task. At boot time, it is invoked for the boot CPU thread by
sched_init(). Then smp_init() creates the threads for all the secondary
CPUs and invokes init_idle() on them.
As the hotplug machinery brings the secondaries to life, it will issue
calls to idle_thread_get(), which itself invokes init_idle() yet again.
In this case it's invoked twice more per secondary: at _cpu_up(), and at
bringup_cpu().
Given smp_init() already initializes the idle tasks for all *possible*
CPUs, no further initialization should be required. Now, removing
init_idle() from idle_thread_get() exposes some interesting expectations
with regards to the idle task's preempt_count: the secondary startup always
issues a preempt_disable(), requiring some reset of the preempt count to 0
between hot-unplug and hotplug, which is currently served by
idle_thread_get() -> idle_init().
Given the idle task is supposed to have preemption disabled once and never
see it re-enabled, it seems that what we actually want is to initialize its
preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove
init_idle() from idle_thread_get().
Secondary startups were patched via coccinelle:
@begone@
@@
-preempt_disable();
...
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210512094636.2958515-1-valentin.schneider@arm.com
141 lines
3.1 KiB
C
141 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <asm/current.h>
|
|
#include <linux/thread_info.h>
|
|
#include <asm/atomic_ops.h>
|
|
|
|
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
|
|
|
/* We use the MSB mostly because its available */
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
|
|
|
static inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
|
|
}
|
|
|
|
static inline void preempt_count_set(int pc)
|
|
{
|
|
int old, new;
|
|
|
|
do {
|
|
old = READ_ONCE(S390_lowcore.preempt_count);
|
|
new = (old & PREEMPT_NEED_RESCHED) |
|
|
(pc & ~PREEMPT_NEED_RESCHED);
|
|
} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
|
|
old, new) != old);
|
|
}
|
|
|
|
#define init_task_preempt_count(p) do { } while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
static inline void set_preempt_need_resched(void)
|
|
{
|
|
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
|
}
|
|
|
|
static inline void clear_preempt_need_resched(void)
|
|
{
|
|
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
|
}
|
|
|
|
static inline bool test_preempt_need_resched(void)
|
|
{
|
|
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
|
|
}
|
|
|
|
static inline void __preempt_count_add(int val)
|
|
{
|
|
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
|
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
|
else
|
|
__atomic_add(val, &S390_lowcore.preempt_count);
|
|
}
|
|
|
|
static inline void __preempt_count_sub(int val)
|
|
{
|
|
__preempt_count_add(-val);
|
|
}
|
|
|
|
static inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
|
|
}
|
|
|
|
static inline bool should_resched(int preempt_offset)
|
|
{
|
|
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
|
|
preempt_offset);
|
|
}
|
|
|
|
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
|
|
|
#define PREEMPT_ENABLED (0)
|
|
|
|
static inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(S390_lowcore.preempt_count);
|
|
}
|
|
|
|
static inline void preempt_count_set(int pc)
|
|
{
|
|
S390_lowcore.preempt_count = pc;
|
|
}
|
|
|
|
#define init_task_preempt_count(p) do { } while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
static inline void set_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static inline void clear_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static inline bool test_preempt_need_resched(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void __preempt_count_add(int val)
|
|
{
|
|
S390_lowcore.preempt_count += val;
|
|
}
|
|
|
|
static inline void __preempt_count_sub(int val)
|
|
{
|
|
S390_lowcore.preempt_count -= val;
|
|
}
|
|
|
|
static inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
return !--S390_lowcore.preempt_count && tif_need_resched();
|
|
}
|
|
|
|
static inline bool should_resched(int preempt_offset)
|
|
{
|
|
return unlikely(preempt_count() == preempt_offset &&
|
|
tif_need_resched());
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
extern void preempt_schedule(void);
|
|
#define __preempt_schedule() preempt_schedule()
|
|
extern void preempt_schedule_notrace(void);
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
#endif /* CONFIG_PREEMPTION */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|