diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 82427e83ba8..2dfcaee9af4 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -41,6 +41,7 @@ else() list(APPEND kernel_files main_weak.c banner.c + busy_wait.c device.c errno.c fatal.c diff --git a/kernel/busy_wait.c b/kernel/busy_wait.c new file mode 100644 index 00000000000..c1902075450 --- /dev/null +++ b/kernel/busy_wait.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +void z_impl_k_busy_wait(uint32_t usec_to_wait) +{ + SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait); + if (usec_to_wait == 0U) { + SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); + return; + } + +#if defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) + arch_busy_wait(usec_to_wait); +#else + uint32_t start_cycles = k_cycle_get_32(); + + /* use 64-bit math to prevent overflow when multiplying */ + uint32_t cycles_to_wait = (uint32_t)( + (uint64_t)usec_to_wait * + (uint64_t)sys_clock_hw_cycles_per_sec() / + (uint64_t)USEC_PER_SEC + ); + + for (;;) { + uint32_t current_cycles = k_cycle_get_32(); + + /* this handles the rollover on an unsigned 32-bit value */ + if ((current_cycles - start_cycles) >= cycles_to_wait) { + break; + } + } +#endif + + SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); +} + +#ifdef CONFIG_USERSPACE +static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) +{ + z_impl_k_busy_wait(usec_to_wait); +} +#include +#endif /* CONFIG_USERSPACE */ diff --git a/kernel/timeout.c b/kernel/timeout.c index 67c128429bd..df8c2aaede3 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -289,46 +289,6 @@ static inline int64_t z_vrfy_k_uptime_ticks(void) #include #endif -void z_impl_k_busy_wait(uint32_t usec_to_wait) -{ - SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait); - if (usec_to_wait == 0U) { - SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); - return; - } - -#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) - uint32_t start_cycles = k_cycle_get_32(); - - /* use 64-bit math to prevent overflow when multiplying */ - uint32_t cycles_to_wait = (uint32_t)( - (uint64_t)usec_to_wait * - (uint64_t)sys_clock_hw_cycles_per_sec() / - (uint64_t)USEC_PER_SEC - ); - - for (;;) { - uint32_t current_cycles = k_cycle_get_32(); - - /* this handles the rollover on an unsigned 32-bit value */ - if ((current_cycles - start_cycles) >= cycles_to_wait) { - break; - } - } -#else - arch_busy_wait(usec_to_wait); -#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ - SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); -} - -#ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) -{ - z_impl_k_busy_wait(usec_to_wait); -} -#include -#endif /* CONFIG_USERSPACE */ - /* Returns the uptime expiration (relative to an unlocked "now"!) of a * timeout object. When used correctly, this should be called once, * synchronously with the user passing a new timeout value. It should