kernel: split k_busy_wait() out of timeout.c

This will allow for builds with CONFIG_SYS_CLOCK_EXISTS=n. For now this
is only the code move.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2023-06-30 22:22:35 -04:00 committed by Anas Nashif
parent 0635e2690e
commit b157031468
3 changed files with 52 additions and 40 deletions

View file

@ -41,6 +41,7 @@ else()
list(APPEND kernel_files
main_weak.c
banner.c
busy_wait.c
device.c
errno.c
fatal.c

51
kernel/busy_wait.c Normal file
View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/sys_clock.h>
#include <kernel_arch_interface.h>
void z_impl_k_busy_wait(uint32_t usec_to_wait)
{
SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
if (usec_to_wait == 0U) {
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
return;
}
#if defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
arch_busy_wait(usec_to_wait);
#else
uint32_t start_cycles = k_cycle_get_32();
/* use 64-bit math to prevent overflow when multiplying */
uint32_t cycles_to_wait = (uint32_t)(
(uint64_t)usec_to_wait *
(uint64_t)sys_clock_hw_cycles_per_sec() /
(uint64_t)USEC_PER_SEC
);
for (;;) {
uint32_t current_cycles = k_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
#endif
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
{
z_impl_k_busy_wait(usec_to_wait);
}
#include <syscalls/k_busy_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */

View file

@ -289,46 +289,6 @@ static inline int64_t z_vrfy_k_uptime_ticks(void)
#include <syscalls/k_uptime_ticks_mrsh.c>
#endif
void z_impl_k_busy_wait(uint32_t usec_to_wait)
{
SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
if (usec_to_wait == 0U) {
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
return;
}
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
uint32_t start_cycles = k_cycle_get_32();
/* use 64-bit math to prevent overflow when multiplying */
uint32_t cycles_to_wait = (uint32_t)(
(uint64_t)usec_to_wait *
(uint64_t)sys_clock_hw_cycles_per_sec() /
(uint64_t)USEC_PER_SEC
);
for (;;) {
uint32_t current_cycles = k_cycle_get_32();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
#else
arch_busy_wait(usec_to_wait);
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
{
z_impl_k_busy_wait(usec_to_wait);
}
#include <syscalls/k_busy_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
/* Returns the uptime expiration (relative to an unlocked "now"!) of a
* timeout object. When used correctly, this should be called once,
* synchronously with the user passing a new timeout value. It should