kernel: inline z_sched_prio_cmp()

Inlines z_sched_prio_cmp() to get better performance.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-11-07 22:07:55 -08:00 committed by Benjamin Cabé
parent c6693bfdae
commit d1c2fc0667
3 changed files with 41 additions and 47 deletions

View file

@ -119,8 +119,6 @@ static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2); return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
} }
int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
static inline bool _is_valid_prio(int prio, void *entry_point) static inline bool _is_valid_prio(int prio, void *entry_point)
{ {
if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) { if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {

View file

@ -10,9 +10,6 @@
#include <zephyr/sys/math_extras.h> #include <zephyr/sys/math_extras.h>
#include <zephyr/sys/dlist.h> #include <zephyr/sys/dlist.h>
extern int32_t z_sched_prio_cmp(struct k_thread *thread_1,
struct k_thread *thread_2);
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
/* Dumb Scheduling */ /* Dumb Scheduling */
@ -64,6 +61,47 @@ static ALWAYS_INLINE void z_priq_dumb_init(sys_dlist_t *pq)
sys_dlist_init(pq); sys_dlist_init(pq);
} }
/*
* Return value same as e.g. memcmp
* > 0 -> thread 1 priority > thread 2 priority
* = 0 -> thread 1 priority == thread 2 priority
* < 0 -> thread 1 priority < thread 2 priority
* Do not rely on the actual value returned aside from the above.
* (Again, like memcmp.)
*/
static ALWAYS_INLINE int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2)
{
/* `prio` is <32b, so the below cannot overflow. */
int32_t b1 = thread_1->base.prio;
int32_t b2 = thread_2->base.prio;
if (b1 != b2) {
return b2 - b1;
}
#ifdef CONFIG_SCHED_DEADLINE
/* If we assume all deadlines live within the same "half" of
* the 32 bit modulus space (this is a documented API rule),
* then the latest deadline in the queue minus the earliest is
* guaranteed to be (2's complement) non-negative. We can
* leverage that to compare the values without having to check
* the current time.
*/
uint32_t d1 = thread_1->base.prio_deadline;
uint32_t d2 = thread_2->base.prio_deadline;
if (d1 != d2) {
/* Sooner deadline means higher effective priority.
* Doing the calculation with unsigned types and casting
* to signed isn't perfect, but at least reduces this
* from UB on overflow to impdef.
*/
return (int32_t)(d2 - d1);
}
#endif /* CONFIG_SCHED_DEADLINE */
return 0;
}
static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
{ {
struct k_thread *t; struct k_thread *t;

View file

@ -45,48 +45,6 @@ BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
"CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative " "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
"threads."); "threads.");
/*
* Return value same as e.g. memcmp
* > 0 -> thread 1 priority > thread 2 priority
* = 0 -> thread 1 priority == thread 2 priority
* < 0 -> thread 1 priority < thread 2 priority
* Do not rely on the actual value returned aside from the above.
* (Again, like memcmp.)
*/
int32_t z_sched_prio_cmp(struct k_thread *thread_1,
struct k_thread *thread_2)
{
/* `prio` is <32b, so the below cannot overflow. */
int32_t b1 = thread_1->base.prio;
int32_t b2 = thread_2->base.prio;
if (b1 != b2) {
return b2 - b1;
}
#ifdef CONFIG_SCHED_DEADLINE
/* If we assume all deadlines live within the same "half" of
* the 32 bit modulus space (this is a documented API rule),
* then the latest deadline in the queue minus the earliest is
* guaranteed to be (2's complement) non-negative. We can
* leverage that to compare the values without having to check
* the current time.
*/
uint32_t d1 = thread_1->base.prio_deadline;
uint32_t d2 = thread_2->base.prio_deadline;
if (d1 != d2) {
/* Sooner deadline means higher effective priority.
* Doing the calculation with unsigned types and casting
* to signed isn't perfect, but at least reduces this
* from UB on overflow to impdef.
*/
return (int32_t) (d2 - d1);
}
#endif /* CONFIG_SCHED_DEADLINE */
return 0;
}
static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
{ {
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY