kernel: move current thread pointer management to core code

Define the generic _current directly and get rid of the generic
arch_current_get().

The SMP default implementation is now known as z_smp_current_get().
It is no longer inlined which saves significant binary size (about 10%
for some random test case I checked).

Introduce z_current_thread_set() and use it in place of
arch_current_thread_set() for updating the current thread pointer
given this is not necessarily an architecture specific operation.
The architecture specific optimization, when enabled, should only care
about its own things and not have to also update the generic
_current_cpu->current copy.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2025-01-07 15:42:07 -05:00 committed by Benjamin Cabé
parent 46aa6717ff
commit 7a3124d866
9 changed files with 38 additions and 60 deletions

View file

@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
{
z_arm_prepare_switch_to_main();
arch_current_thread_set(main_thread);
z_current_thread_set(main_thread);
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* On Cortex-M, TLS uses a global variable as pointer to

View file

@ -50,7 +50,7 @@ int arch_swap(unsigned int key)
_current->callee_saved.thread_status;
arch_current_thread_set(_kernel.ready_q.cache);
z_current_thread_set(_kernel.ready_q.cache);
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();
#endif
@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
z_thread_mark_switched_out();
#endif
arch_current_thread_set(_kernel.ready_q.cache);
z_current_thread_set(_kernel.ready_q.cache);
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
z_thread_mark_switched_in();

View file

@ -34,6 +34,4 @@
#include <zephyr/arch/sparc/arch_inlines.h>
#endif
#include <zephyr/arch/common/arch_inlines.h>
#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */

View file

@ -1,45 +0,0 @@
/*
* Copyright (c) 2024 Meta Platforms.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_
#ifndef ZEPHYR_INCLUDE_ARCH_INLINES_H_
#error "This header shouldn't be included directly"
#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#ifndef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
static ALWAYS_INLINE struct k_thread *arch_current_thread(void)
{
#ifdef CONFIG_SMP
/* In SMP, _current is a field read from _current_cpu, which
* can race with preemption before it is read. We must lock
* local interrupts when reading it.
*/
unsigned int k = arch_irq_lock();
struct k_thread *ret = _current_cpu->current;
arch_irq_unlock(k);
#else
struct k_thread *ret = _kernel.cpus[0].current;
#endif /* CONFIG_SMP */
return ret;
}
static ALWAYS_INLINE void arch_current_thread_set(struct k_thread *thread)
{
_current_cpu->current = thread;
}
#endif /* CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_ */

View file

@ -28,13 +28,12 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
}
#ifdef CONFIG_RISCV_CURRENT_VIA_GP
register struct k_thread *__arch_current_thread __asm__("gp");
#define arch_current_thread() __arch_current_thread
#define arch_current_thread_set(thread) \
do { \
__arch_current_thread = _current_cpu->current = (thread); \
} while (0)
#define arch_current_thread_set(thread) ({ __arch_current_thread = (thread); })
#endif /* CONFIG_RISCV_CURRENT_VIA_GP */
static ALWAYS_INLINE unsigned int arch_num_cpus(void)

View file

@ -260,16 +260,28 @@ extern atomic_t _cpus_active;
* another SMP CPU.
*/
bool z_smp_cpu_mobile(void);
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
arch_curr_cpu(); })
#define _current arch_current_thread()
struct k_thread *z_smp_current_get(void);
#define _current z_smp_current_get()
#else
#define _current_cpu (&_kernel.cpus[0])
#define _current _kernel.cpus[0].current
#endif
/* This is always invoked from a context where preemption is disabled */
#define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })
#ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
#undef _current
#define _current arch_current_thread()
#undef z_current_thread_set
#define z_current_thread_set(thread) \
arch_current_thread_set(({ _current_cpu->current = (thread); }))
#endif
/* kernel wait queue record */
#ifdef CONFIG_WAITQ_SCALABLE

View file

@ -133,7 +133,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
#endif /* CONFIG_SMP */
z_thread_mark_switched_out();
z_sched_switch_spin(new_thread);
arch_current_thread_set(new_thread);
z_current_thread_set(new_thread);
#ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread);
@ -259,6 +259,6 @@ static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
dummy_thread->base.slice_ticks = 0;
#endif /* CONFIG_TIMESLICE_PER_THREAD */
arch_current_thread_set(dummy_thread);
z_current_thread_set(dummy_thread);
}
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */

View file

@ -797,11 +797,11 @@ struct k_thread *z_swap_next_thread(void)
}
#ifdef CONFIG_USE_SWITCH
/* Just a wrapper around arch_current_thread_set(xxx) with tracing */
/* Just a wrapper around z_current_thread_set(xxx) with tracing */
static inline void set_current(struct k_thread *new_thread)
{
z_thread_mark_switched_out();
arch_current_thread_set(new_thread);
z_current_thread_set(new_thread);
}
/**

View file

@ -248,3 +248,17 @@ bool z_smp_cpu_mobile(void)
arch_irq_unlock(k);
return !pinned;
}
struct k_thread *z_smp_current_get(void)
{
/*
* _current is a field read from _current_cpu, which can race
* with preemption before it is read. We must lock local
* interrupts when reading it.
*/
unsigned int key = arch_irq_lock();
struct k_thread *t = _current_cpu->current;
arch_irq_unlock(key);
return t;
}