Define the generic _current directly and get rid of the generic arch_current_get(). The SMP default implementation is now known as z_smp_current_get(). It is no longer inlined which saves significant binary size (about 10% for some random test case I checked). Introduce z_current_thread_set() and use it in place of arch_current_thread_set() for updating the current thread pointer given this is not necessarily an architecture specific operation. The architecture specific optimization, when enabled, should only care about its own things and not have to also update the generic _current_cpu->current copy. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
118 lines
2.9 KiB
C
118 lines
2.9 KiB
C
/*
|
|
* Copyright (c) 2010-2015 Wind River Systems, Inc.
|
|
* Copyright (c) 2017 Oticon A/S
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief Kernel swapper code for POSIX
|
|
*
|
|
* This module implements the arch_swap() routine for the POSIX architecture.
|
|
*
|
|
*/
|
|
|
|
#include <zephyr/kernel.h>
|
|
#include <zephyr/kernel_structs.h>
|
|
#include "posix_core.h"
|
|
#include <zephyr/irq.h>
|
|
#include "kswap.h"
|
|
#include <zephyr/pm/pm.h>
|
|
|
|
int arch_swap(unsigned int key)
|
|
{
|
|
/*
|
|
* struct k_thread * _current is the currently running thread
|
|
* struct k_thread * _kernel.ready_q.cache contains the next thread to
|
|
* run (cannot be NULL)
|
|
*
|
|
* Here a "real" arch would save all processor registers, stack pointer
|
|
* and so forth. But we do not need to do so because we use posix
|
|
* threads => those are all nicely kept by the native OS kernel
|
|
*/
|
|
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
|
z_thread_mark_switched_out();
|
|
#endif
|
|
_current->callee_saved.key = key;
|
|
_current->callee_saved.retval = -EAGAIN;
|
|
|
|
/* retval may be modified with a call to
|
|
* arch_thread_return_value_set()
|
|
*/
|
|
|
|
posix_thread_status_t *ready_thread_ptr =
|
|
(posix_thread_status_t *)
|
|
_kernel.ready_q.cache->callee_saved.thread_status;
|
|
|
|
posix_thread_status_t *this_thread_ptr =
|
|
(posix_thread_status_t *)
|
|
_current->callee_saved.thread_status;
|
|
|
|
|
|
z_current_thread_set(_kernel.ready_q.cache);
|
|
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
|
|
z_thread_mark_switched_in();
|
|
#endif
|
|
|
|
/*
|
|
* Here a "real" arch would load all processor registers for the thread
|
|
* to run. In this arch case, we just block this thread until allowed
|
|
* to run later, and signal to whomever is allowed to run to
|
|
* continue.
|
|
*/
|
|
posix_swap(ready_thread_ptr->thread_idx,
|
|
this_thread_ptr->thread_idx);
|
|
|
|
/* When we continue, _kernel->current points back to this thread */
|
|
|
|
irq_unlock(_current->callee_saved.key);
|
|
|
|
return _current->callee_saved.retval;
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
|
|
/* This is just a version of arch_swap() in which we do not save anything
|
|
* about the current thread.
|
|
*
|
|
* Note that we will never come back to this thread: posix_main_thread_start()
|
|
* does never return.
|
|
*/
|
|
void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
|
|
k_thread_entry_t _main)
|
|
{
|
|
ARG_UNUSED(stack_ptr);
|
|
ARG_UNUSED(_main);
|
|
|
|
posix_thread_status_t *ready_thread_ptr =
|
|
(posix_thread_status_t *)
|
|
_kernel.ready_q.cache->callee_saved.thread_status;
|
|
|
|
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
|
|
z_thread_mark_switched_out();
|
|
#endif
|
|
|
|
z_current_thread_set(_kernel.ready_q.cache);
|
|
|
|
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
|
|
z_thread_mark_switched_in();
|
|
#endif
|
|
|
|
posix_main_thread_start(ready_thread_ptr->thread_idx);
|
|
} /* LCOV_EXCL_LINE */
|
|
#endif
|
|
|
|
#ifdef CONFIG_PM
|
|
/**
|
|
* If the kernel is in idle mode, take it out
|
|
*/
|
|
void posix_irq_check_idle_exit(void)
|
|
{
|
|
if (_kernel.idle) {
|
|
_kernel.idle = 0;
|
|
pm_system_resume();
|
|
}
|
|
}
|
|
#endif
|