kernel: smp: introduce k_smp_cpu_resume
This provides a path to resume a previously suspended CPU. This differs from k_smp_cpu_start() where per-CPU kernel structs are not initialized such that execution context can be saved during suspend and restored during resume. Though the actual context saving and restoring are platform specific. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
fe66e35db0
commit
eefaeee061
3 changed files with 94 additions and 11 deletions
|
|
@ -7,6 +7,8 @@
|
||||||
#ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_
|
#ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_
|
||||||
#define ZEPHYR_INCLUDE_KERNEL_SMP_H_
|
#define ZEPHYR_INCLUDE_KERNEL_SMP_H_
|
||||||
|
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
typedef void (*smp_init_fn)(void *arg);
|
typedef void (*smp_init_fn)(void *arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -25,6 +27,11 @@ typedef void (*smp_init_fn)(void *arg);
|
||||||
* Detection of such state(s) must be provided by
|
* Detection of such state(s) must be provided by
|
||||||
* the platform layers.
|
* the platform layers.
|
||||||
*
|
*
|
||||||
|
* @note This initializes per-CPU kernel structs and also
|
||||||
|
* initializes timers needed for MP operations.
|
||||||
|
* Use @ref k_smp_cpu_resume if these are not
|
||||||
|
* desired.
|
||||||
|
*
|
||||||
* @param id ID of target CPU.
|
* @param id ID of target CPU.
|
||||||
* @param fn Function to be called before letting scheduler
|
* @param fn Function to be called before letting scheduler
|
||||||
* run.
|
* run.
|
||||||
|
|
@ -32,4 +39,33 @@ typedef void (*smp_init_fn)(void *arg);
|
||||||
*/
|
*/
|
||||||
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg);
|
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Resume a previously suspended CPU.
|
||||||
|
*
|
||||||
|
* This function works like @ref k_smp_cpu_start, but does not
|
||||||
|
* re-initialize the kernel's internal tracking data for
|
||||||
|
* the target CPU. Therefore, @ref k_smp_cpu_start must have
|
||||||
|
* previously been called for the target CPU, and it must have
|
||||||
|
* verifiably reached an idle/off state (detection of which
|
||||||
|
* must be provided by the platform layers). It may be used
|
||||||
|
* in cases where platform layers require, for example, that
|
||||||
|
* data on the interrupt or idle stack be preserved.
|
||||||
|
*
|
||||||
|
* @note This function must not be used on currently running
|
||||||
|
* CPU. The target CPU must be in suspended state, or
|
||||||
|
* in certain architectural state(s) where the CPU is
|
||||||
|
* permitted to go through the resume process.
|
||||||
|
* Detection of such state(s) must be provided by
|
||||||
|
* the platform layers.
|
||||||
|
*
|
||||||
|
* @param id ID of target CPU.
|
||||||
|
* @param fn Function to be called before resuming context.
|
||||||
|
* @param arg Argument to @a fn.
|
||||||
|
* @param reinit_timer True if timer needs to be re-initialized.
|
||||||
|
* @param invoke_sched True if scheduler is invoked after the CPU
|
||||||
|
* has started.
|
||||||
|
*/
|
||||||
|
void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
|
||||||
|
bool reinit_timer, bool invoke_sched);
|
||||||
|
|
||||||
#endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */
|
#endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */
|
||||||
|
|
|
||||||
|
|
@ -216,7 +216,7 @@ void arch_cpu_atomic_idle(unsigned int key);
|
||||||
*
|
*
|
||||||
* @param data context parameter, implementation specific
|
* @param data context parameter, implementation specific
|
||||||
*/
|
*/
|
||||||
typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
|
typedef void (*arch_cpustart_t)(void *data);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start a numbered CPU on a MP-capable system
|
* @brief Start a numbered CPU on a MP-capable system
|
||||||
|
|
|
||||||
67
kernel/smp.c
67
kernel/smp.c
|
|
@ -42,6 +42,14 @@ static struct cpu_start_cb {
|
||||||
|
|
||||||
/** Argument to @ref cpu_start_fn.fn. */
|
/** Argument to @ref cpu_start_fn.fn. */
|
||||||
void *arg;
|
void *arg;
|
||||||
|
|
||||||
|
/** Invoke scheduler after CPU has started if true. */
|
||||||
|
bool invoke_sched;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
|
/** True if smp_timer_init() needs to be called. */
|
||||||
|
bool reinit_timer;
|
||||||
|
#endif
|
||||||
} cpu_start_fn;
|
} cpu_start_fn;
|
||||||
|
|
||||||
static struct k_spinlock cpu_start_lock;
|
static struct k_spinlock cpu_start_lock;
|
||||||
|
|
@ -111,7 +119,7 @@ void z_smp_thread_swap(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline FUNC_NORETURN void smp_init_top(void *arg)
|
static inline void smp_init_top(void *arg)
|
||||||
{
|
{
|
||||||
struct k_thread dummy_thread;
|
struct k_thread dummy_thread;
|
||||||
struct cpu_start_cb *csc = arg;
|
struct cpu_start_cb *csc = arg;
|
||||||
|
|
@ -124,13 +132,10 @@ static inline FUNC_NORETURN void smp_init_top(void *arg)
|
||||||
*/
|
*/
|
||||||
wait_for_start_signal(&cpu_start_flag);
|
wait_for_start_signal(&cpu_start_flag);
|
||||||
|
|
||||||
/* Initialize the dummy thread struct so that
|
|
||||||
* the scheduler can schedule actual threads to run.
|
|
||||||
*/
|
|
||||||
z_dummy_thread_init(&dummy_thread);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
smp_timer_init();
|
if ((csc == NULL) || csc->reinit_timer) {
|
||||||
|
smp_timer_init();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Do additional initialization steps if needed. */
|
/* Do additional initialization steps if needed. */
|
||||||
|
|
@ -138,6 +143,16 @@ static inline FUNC_NORETURN void smp_init_top(void *arg)
|
||||||
csc->fn(csc->arg);
|
csc->fn(csc->arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((csc != NULL) && !csc->invoke_sched) {
|
||||||
|
/* Don't invoke scheduler. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialize the dummy thread struct so that
|
||||||
|
* the scheduler can schedule actual threads to run.
|
||||||
|
*/
|
||||||
|
z_dummy_thread_init(&dummy_thread);
|
||||||
|
|
||||||
/* Let scheduler decide what thread to run next. */
|
/* Let scheduler decide what thread to run next. */
|
||||||
z_swap_unlocked();
|
z_swap_unlocked();
|
||||||
|
|
||||||
|
|
@ -146,9 +161,6 @@ static inline FUNC_NORETURN void smp_init_top(void *arg)
|
||||||
|
|
||||||
static void start_cpu(int id, struct cpu_start_cb *csc)
|
static void start_cpu(int id, struct cpu_start_cb *csc)
|
||||||
{
|
{
|
||||||
/* Initialize various CPU structs related to this CPU. */
|
|
||||||
z_init_cpu(id);
|
|
||||||
|
|
||||||
/* Clear the ready flag so the newly powered up CPU can
|
/* Clear the ready flag so the newly powered up CPU can
|
||||||
* signal that it has powered up.
|
* signal that it has powered up.
|
||||||
*/
|
*/
|
||||||
|
|
@ -172,6 +184,40 @@ void k_smp_cpu_start(int id, smp_init_fn fn, void *arg)
|
||||||
|
|
||||||
cpu_start_fn.fn = fn;
|
cpu_start_fn.fn = fn;
|
||||||
cpu_start_fn.arg = arg;
|
cpu_start_fn.arg = arg;
|
||||||
|
cpu_start_fn.invoke_sched = true;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
|
cpu_start_fn.reinit_timer = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* We are only starting one CPU so we do not need to synchronize
|
||||||
|
* across all CPUs using the start_flag. So just set it to 1.
|
||||||
|
*/
|
||||||
|
(void)atomic_set(&cpu_start_flag, 1); /* async, don't care */
|
||||||
|
|
||||||
|
/* Initialize various CPU structs related to this CPU. */
|
||||||
|
z_init_cpu(id);
|
||||||
|
|
||||||
|
/* Start the CPU! */
|
||||||
|
start_cpu(id, &cpu_start_fn);
|
||||||
|
|
||||||
|
k_spin_unlock(&cpu_start_lock, key);
|
||||||
|
}
|
||||||
|
|
||||||
|
void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
|
||||||
|
bool reinit_timer, bool invoke_sched)
|
||||||
|
{
|
||||||
|
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
|
||||||
|
|
||||||
|
cpu_start_fn.fn = fn;
|
||||||
|
cpu_start_fn.arg = arg;
|
||||||
|
cpu_start_fn.invoke_sched = invoke_sched;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
|
cpu_start_fn.reinit_timer = reinit_timer;
|
||||||
|
#else
|
||||||
|
ARG_UNUSED(reinit_timer);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* We are only starting one CPU so we do not need to synchronize
|
/* We are only starting one CPU so we do not need to synchronize
|
||||||
* across all CPUs using the start_flag. So just set it to 1.
|
* across all CPUs using the start_flag. So just set it to 1.
|
||||||
|
|
@ -195,6 +241,7 @@ void z_smp_init(void)
|
||||||
unsigned int num_cpus = arch_num_cpus();
|
unsigned int num_cpus = arch_num_cpus();
|
||||||
|
|
||||||
for (int i = 1; i < num_cpus; i++) {
|
for (int i = 1; i < num_cpus; i++) {
|
||||||
|
z_init_cpu(i);
|
||||||
start_cpu(i, NULL);
|
start_cpu(i, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue