kernel: Add k_reschedule()
The routine k_reschedule() allows an application to manually force a schedule point. Although similar to k_yield(), it has different properties. The most significant difference is that k_yield() if invoked from a cooperative thread will voluntarily give up execution control to the next thread of equal or higher priority while k_reschedule() will not. Applications that play with EDF deadlines via k_thread_deadline_set() may need to use k_reschedule() to force a reschedule. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
3f4ff78c1b
commit
669f2c489a
2 changed files with 38 additions and 0 deletions
|
|
@ -946,6 +946,26 @@ __syscall void k_thread_priority_set(k_tid_t thread, int prio);
|
||||||
__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
|
__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Invoke the scheduler
|
||||||
|
*
|
||||||
|
* This routine invokes the scheduler to force a schedule point on the current
|
||||||
|
* CPU. If invoked from within a thread, the scheduler will be invoked
|
||||||
|
* immediately (provided interrupts were not locked when invoked). If invoked
|
||||||
|
* from within an ISR, the scheduler will be invoked upon exiting the ISR.
|
||||||
|
*
|
||||||
|
* Invoking the scheduler allows the kernel to make an immediate determination
|
||||||
|
* as to what the next thread to execute should be. Unlike yielding, this
|
||||||
|
* routine is not guaranteed to switch to a thread of equal or higher priority
|
||||||
|
* if any are available. For example, if the current thread is cooperative and
|
||||||
|
* there is a still higher priority cooperative thread that is ready, then
|
||||||
|
* yielding will switch to that higher priority thread whereas this routine
|
||||||
|
* will not.
|
||||||
|
*
|
||||||
|
* Most applications will never use this routine.
|
||||||
|
*/
|
||||||
|
__syscall void k_reschedule(void);
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_CPU_MASK
|
#ifdef CONFIG_SCHED_CPU_MASK
|
||||||
/**
|
/**
|
||||||
* @brief Sets all CPU enable masks to zero
|
* @brief Sets all CPU enable masks to zero
|
||||||
|
|
|
||||||
|
|
@ -1050,6 +1050,24 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
#endif /* CONFIG_SCHED_DEADLINE */
|
#endif /* CONFIG_SCHED_DEADLINE */
|
||||||
|
|
||||||
|
void z_impl_k_reschedule(void)
|
||||||
|
{
|
||||||
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
|
key = k_spin_lock(&_sched_spinlock);
|
||||||
|
|
||||||
|
update_cache(0);
|
||||||
|
|
||||||
|
z_reschedule(&_sched_spinlock, key);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_USERSPACE
|
||||||
|
static inline void z_vrfy_k_reschedule(void)
|
||||||
|
{
|
||||||
|
z_impl_k_reschedule();
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
||||||
bool k_can_yield(void)
|
bool k_can_yield(void)
|
||||||
{
|
{
|
||||||
return !(k_is_pre_kernel() || k_is_in_isr() ||
|
return !(k_is_pre_kernel() || k_is_in_isr() ||
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue