kernel/smp: Fix races in SMP initialization
This has bitrotten a bit. Early implementations had a synchronous arch_start_cpu(), but then we started allowing that to be an async operation. But that means that CPU start now becomes surprisingly reentrant to the arch layer (cpu 0 can get a call to start cpu 2 while cpu 1's initialization code is still running). That's just error prone; we never documented the requirements cleanly (the window is very small, but not so small to a slow simulator!). Add an extra flag so we don't issue the next start until the last is out of the arch layer and running in smp_init_top(). Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
97ada8bc04
commit
3457118540
1 changed files with 21 additions and 3 deletions
24
kernel/smp.c
24
kernel/smp.c
|
|
@ -13,6 +13,10 @@
|
||||||
static atomic_t global_lock;
|
static atomic_t global_lock;
|
||||||
static atomic_t start_flag;
|
static atomic_t start_flag;
|
||||||
|
|
||||||
|
#if CONFIG_MP_NUM_CPUS > 1
|
||||||
|
static atomic_t ready_flag;
|
||||||
|
#endif
|
||||||
|
|
||||||
unsigned int z_smp_global_lock(void)
|
unsigned int z_smp_global_lock(void)
|
||||||
{
|
{
|
||||||
unsigned int key = arch_irq_lock();
|
unsigned int key = arch_irq_lock();
|
||||||
|
|
@ -48,6 +52,15 @@ void z_smp_release_global_lock(struct k_thread *thread)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tiny delay that relaxes bus traffic to avoid spamming a shared
|
||||||
|
* memory bus looking at an atomic variable
|
||||||
|
*/
|
||||||
|
static inline void local_delay(void)
|
||||||
|
{
|
||||||
|
for (volatile int i = 0; i < 1000; i++) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#if CONFIG_MP_NUM_CPUS > 1
|
#if CONFIG_MP_NUM_CPUS > 1
|
||||||
|
|
||||||
void z_smp_thread_init(void *arg, struct k_thread *thread)
|
void z_smp_thread_init(void *arg, struct k_thread *thread)
|
||||||
|
|
@ -56,6 +69,7 @@ void z_smp_thread_init(void *arg, struct k_thread *thread)
|
||||||
|
|
||||||
/* Wait for the signal to begin scheduling */
|
/* Wait for the signal to begin scheduling */
|
||||||
while (!atomic_get(cpu_start_flag)) {
|
while (!atomic_get(cpu_start_flag)) {
|
||||||
|
local_delay();
|
||||||
}
|
}
|
||||||
|
|
||||||
z_dummy_thread_init(thread);
|
z_dummy_thread_init(thread);
|
||||||
|
|
@ -70,9 +84,9 @@ static inline FUNC_NORETURN void smp_init_top(void *arg)
|
||||||
{
|
{
|
||||||
struct k_thread dummy_thread;
|
struct k_thread dummy_thread;
|
||||||
|
|
||||||
z_smp_thread_init(arg, &dummy_thread);
|
|
||||||
smp_timer_init();
|
smp_timer_init();
|
||||||
|
(void)atomic_set(&ready_flag, 1);
|
||||||
|
z_smp_thread_init(arg, &dummy_thread);
|
||||||
z_swap_unlocked();
|
z_swap_unlocked();
|
||||||
|
|
||||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||||
|
|
@ -92,10 +106,14 @@ void z_smp_init(void)
|
||||||
{
|
{
|
||||||
(void)atomic_clear(&start_flag);
|
(void)atomic_clear(&start_flag);
|
||||||
|
|
||||||
#if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY)
|
#if !defined(CONFIG_SMP_BOOT_DELAY) && (CONFIG_MP_NUM_CPUS > 1)
|
||||||
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
|
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
|
||||||
|
(void)atomic_clear(&ready_flag);
|
||||||
arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
|
arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
|
||||||
smp_init_top, &start_flag);
|
smp_init_top, &start_flag);
|
||||||
|
while (!atomic_get(&ready_flag)) {
|
||||||
|
local_delay();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue