From 47ab66311d41d0f2ce1c47d2c7eb5bc40ef07634 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Fri, 19 Apr 2024 15:08:55 -0700 Subject: [PATCH] kernel/sched: Fix lockless ordering in halt_thread() We've had threads spinning on the thread state bits, but weren't being careful to ensure that those bits were the last things seen to change in a halting thread. Move it to the end, and add a barrier for correctness. Signed-off-by: Andy Ross --- kernel/sched.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/sched.c b/kernel/sched.c index 6ee12648560..f4890389ba5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -195,6 +195,7 @@ static inline bool is_halting(struct k_thread *thread) /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */ static inline void clear_halting(struct k_thread *thread) { + barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */ thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); } @@ -1297,7 +1298,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) */ if ((thread->base.thread_state & new_state) == 0U) { thread->base.thread_state |= new_state; - clear_halting(thread); if (z_is_thread_queued(thread)) { dequeue_thread(thread); } @@ -1325,6 +1325,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) update_cache(1); if (new_state == _THREAD_SUSPENDED) { + clear_halting(thread); return; } @@ -1366,6 +1367,12 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state) if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) { z_dummy_thread_init(&_thread_dummy); } + + /* Finally update the halting thread state, on which + * other CPUs might be spinning (see + * thread_halt_spin()). + */ + clear_halting(thread); } }