kernel: Add custom scheduler yield routines
Adds customized yield implementations based upon the selected scheduler (dumb, multiq or scalable). Although each follows the same broad outline, some of them allow for additional tweaking to extract maximal performance. For example, the multiq variant improves the performance of k_yield() by about 20%. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
30f667bceb
commit
ea6adb6726
2 changed files with 63 additions and 5 deletions
|
|
@ -17,6 +17,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
|||
#define _priq_run_init z_priq_dumb_init
|
||||
#define _priq_run_add z_priq_dumb_add
|
||||
#define _priq_run_remove z_priq_dumb_remove
|
||||
#define _priq_run_yield z_priq_dumb_yield
|
||||
# if defined(CONFIG_SCHED_CPU_MASK)
|
||||
# define _priq_run_best z_priq_dumb_mask_best
|
||||
# else
|
||||
|
|
@ -27,12 +28,14 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
|||
#define _priq_run_init z_priq_rb_init
|
||||
#define _priq_run_add z_priq_rb_add
|
||||
#define _priq_run_remove z_priq_rb_remove
|
||||
#define _priq_run_yield z_priq_rb_yield
|
||||
#define _priq_run_best z_priq_rb_best
|
||||
/* Multi Queue Scheduling */
|
||||
#elif defined(CONFIG_SCHED_MULTIQ)
|
||||
#define _priq_run_init z_priq_mq_init
|
||||
#define _priq_run_add z_priq_mq_add
|
||||
#define _priq_run_remove z_priq_mq_remove
|
||||
#define _priq_run_yield z_priq_mq_yield
|
||||
#define _priq_run_best z_priq_mq_best
|
||||
#endif
|
||||
|
||||
|
|
@ -123,6 +126,37 @@ static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *t
|
|||
sys_dlist_remove(&thread->base.qnode_dlist);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
sys_dnode_t *n;
|
||||
|
||||
n = sys_dlist_peek_next_no_check(pq, &arch_current_thread()->base.qnode_dlist);
|
||||
|
||||
sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist);
|
||||
|
||||
struct k_thread *t;
|
||||
|
||||
/*
|
||||
* As it is possible that the current thread was not at the head of
|
||||
* the run queue, start searching from the present position for where
|
||||
* to re-insert it.
|
||||
*/
|
||||
|
||||
while (n != NULL) {
|
||||
t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
|
||||
if (z_sched_prio_cmp(arch_current_thread(), t) > 0) {
|
||||
sys_dlist_insert(&t->base.qnode_dlist,
|
||||
&arch_current_thread()->base.qnode_dlist);
|
||||
return;
|
||||
}
|
||||
n = sys_dlist_peek_next_no_check(pq, n);
|
||||
}
|
||||
|
||||
sys_dlist_append(pq, &arch_current_thread()->base.qnode_dlist);
|
||||
#endif
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
|
||||
{
|
||||
struct k_thread *thread = NULL;
|
||||
|
|
@ -192,6 +226,14 @@ static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread
|
|||
}
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_priq_rb_yield(struct _priq_rb *pq)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
z_priq_rb_remove(pq, arch_current_thread());
|
||||
z_priq_rb_add(pq, arch_current_thread());
|
||||
#endif
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
|
||||
{
|
||||
struct k_thread *thread = NULL;
|
||||
|
|
@ -247,6 +289,17 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
|
|||
}
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void z_priq_mq_yield(struct _priq_mq *pq)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
struct prio_info pos = get_prio_info(arch_current_thread()->base.prio);
|
||||
|
||||
sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist);
|
||||
sys_dlist_append(&pq->queues[pos.offset_prio],
|
||||
&arch_current_thread()->base.qnode_dlist);
|
||||
#endif
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
|
||||
{
|
||||
struct k_thread *thread = NULL;
|
||||
|
|
|
|||
|
|
@ -88,6 +88,11 @@ static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
|
|||
_priq_run_remove(thread_runq(thread), thread);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void runq_yield(void)
|
||||
{
|
||||
_priq_run_yield(curr_cpu_runq());
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE struct k_thread *runq_best(void)
|
||||
{
|
||||
return _priq_run_best(curr_cpu_runq());
|
||||
|
|
@ -1040,11 +1045,11 @@ void z_impl_k_yield(void)
|
|||
|
||||
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SMP) ||
|
||||
z_is_thread_queued(arch_current_thread())) {
|
||||
dequeue_thread(arch_current_thread());
|
||||
}
|
||||
queue_thread(arch_current_thread());
|
||||
#ifdef CONFIG_SMP
|
||||
z_mark_thread_as_queued(arch_current_thread());
|
||||
#endif
|
||||
runq_yield();
|
||||
|
||||
update_cache(1);
|
||||
z_swap(&_sched_spinlock, key);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue