kernel: Alter z_abort_thread_timeout() return type

No caller of the internal kernel routine z_abort_thread_timeout()
uses its return value anymore.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2025-01-06 17:55:09 -08:00 committed by Benjamin Cabé
parent 30bac038fa
commit bdb04dbfba
3 changed files with 9 additions and 9 deletions

View file

@ -180,7 +180,7 @@ static ALWAYS_INLINE struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
thread = _priq_wait_best(&wait_q->waitq);
if (unlikely(thread != NULL)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
}
}

View file

@ -49,9 +49,9 @@ static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t tic
z_add_timeout(&thread->base.timeout, z_thread_timeout, ticks);
}
static inline int z_abort_thread_timeout(struct k_thread *thread)
static inline void z_abort_thread_timeout(struct k_thread *thread)
{
return z_abort_timeout(&thread->base.timeout);
z_abort_timeout(&thread->base.timeout);
}
int32_t z_get_next_timeout_expiry(void);
@ -62,7 +62,7 @@ k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define z_init_thread_timeout(thread_base) do {} while (false)
#define z_abort_thread_timeout(to) (0)
#define z_abort_thread_timeout(to) do {} while (false)
#define z_is_inactive_timeout(to) 1
#define z_get_next_timeout_expiry() ((int32_t) K_TICKS_FOREVER)
#define z_set_timeout_expiry(ticks, is_idle) do {} while (false)

View file

@ -652,7 +652,7 @@ struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
void z_unpend_thread(struct k_thread *thread)
{
z_unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
}
/* Priority set utility that does no rescheduling, it just changes the
@ -1164,7 +1164,7 @@ void z_impl_k_wakeup(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
@ -1212,7 +1212,7 @@ static inline void unpend_all(_wait_q_t *wait_q)
for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
arch_thread_return_value_set(thread, 0);
ready_thread(thread);
}
@ -1247,7 +1247,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);
/* Edge case: aborting arch_current_thread() from within an
@ -1458,7 +1458,7 @@ bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
swap_retval,
swap_data);
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
z_abort_thread_timeout(thread);
ready_thread(thread);
ret = true;
}