Moves the arch_swap() declaration out of kernel_arch_interface.h and into the various architectures' kernel_arch_func.h. This permits the arch_swap() to be inlined on ARM, but extern'd on the other architectures that still implement arch_swap(). Inlining this function on ARM has shown at least a +5% performance boost according to the thread_metric benchmark on the disco_l475_iot1 board. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
51 lines
1.1 KiB
C
51 lines
1.1 KiB
C
/*
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
* Copyright (c) 2018 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/* this file is only meant to be included by kernel_structs.h */
|
|
|
|
#ifndef ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_
|
|
#define ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_
|
|
|
|
#ifndef _ASMLANGUAGE
|
|
|
|
#include <stddef.h> /* For size_t */
|
|
|
|
#include <zephyr/platform/hooks.h>
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
static inline void arch_kernel_init(void)
|
|
{
|
|
#ifdef CONFIG_SOC_PER_CORE_INIT_HOOK
|
|
soc_per_core_init_hook();
|
|
#endif /* CONFIG_SOC_PER_CORE_INIT_HOOK */
|
|
}
|
|
|
|
static ALWAYS_INLINE void
|
|
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
|
{
|
|
/* write into 'eax' slot created in z_swap() entry */
|
|
|
|
*(unsigned int *)(thread->callee_saved.esp) = value;
|
|
}
|
|
|
|
extern void arch_cpu_atomic_idle(unsigned int key);
|
|
|
|
int arch_swap(unsigned int key);
|
|
|
|
/* ASM code to fiddle with registers to enable the MMU with PAE paging */
|
|
void z_x86_enable_paging(void);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* _ASMLANGUAGE */
|
|
|
|
#endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ */
|