kernel: events: fix walking the waitq race condition
Fixes race condition for k_event_post_internal() in an SMP environment while walking the waitq. Uses z_sched_waitq_walk() to safely walk the waitq by using a sched_spinlock. It should be noted that since walking the wait queue is an operation of indeterminant length, there exists the possibility that the sched_spinlock (which is a highly used and contended-for lock) may be locked for an indeterminant amount of time. However, it is expected that few threads will be waiting on any given kernel event object, which should ameliorate this risk. Fixes #54317 Signed-off-by: Aastha Grover <aastha.grover@intel.com>
This commit is contained in:
parent
cf20ff0765
commit
a2dccf1283
1 changed files with 32 additions and 19 deletions
|
|
@ -39,6 +39,11 @@
|
||||||
|
|
||||||
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
|
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
|
||||||
|
|
||||||
|
struct event_walk_data {
|
||||||
|
struct k_thread *head;
|
||||||
|
uint32_t events;
|
||||||
|
};
|
||||||
|
|
||||||
void z_impl_k_event_init(struct k_event *event)
|
void z_impl_k_event_init(struct k_event *event)
|
||||||
{
|
{
|
||||||
event->events = 0;
|
event->events = 0;
|
||||||
|
|
@ -84,14 +89,35 @@ static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
|
||||||
return match != 0;
|
return match != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int event_walk_op(struct k_thread *thread, void *data)
|
||||||
|
{
|
||||||
|
unsigned int wait_condition;
|
||||||
|
struct event_walk_data *event_data = data;
|
||||||
|
|
||||||
|
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
|
||||||
|
|
||||||
|
if (are_wait_conditions_met(thread->events, event_data->events,
|
||||||
|
wait_condition)) {
|
||||||
|
/*
|
||||||
|
* The wait conditions have been satisfied. Add this
|
||||||
|
* thread to the list of threads to unpend.
|
||||||
|
*/
|
||||||
|
|
||||||
|
thread->next_event_link = event_data->head;
|
||||||
|
event_data->head = thread;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void k_event_post_internal(struct k_event *event, uint32_t events,
|
static void k_event_post_internal(struct k_event *event, uint32_t events,
|
||||||
uint32_t events_mask)
|
uint32_t events_mask)
|
||||||
{
|
{
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
struct k_thread *thread;
|
struct k_thread *thread;
|
||||||
unsigned int wait_condition;
|
struct event_walk_data data;
|
||||||
struct k_thread *head = NULL;
|
|
||||||
|
|
||||||
|
data.head = NULL;
|
||||||
key = k_spin_lock(&event->lock);
|
key = k_spin_lock(&event->lock);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
|
||||||
|
|
@ -100,7 +126,7 @@ static void k_event_post_internal(struct k_event *event, uint32_t events,
|
||||||
events = (event->events & ~events_mask) |
|
events = (event->events & ~events_mask) |
|
||||||
(events & events_mask);
|
(events & events_mask);
|
||||||
event->events = events;
|
event->events = events;
|
||||||
|
data.events = events;
|
||||||
/*
|
/*
|
||||||
* Posting an event has the potential to wake multiple pended threads.
|
* Posting an event has the potential to wake multiple pended threads.
|
||||||
* It is desirable to unpend all affected threads simultaneously. To
|
* It is desirable to unpend all affected threads simultaneously. To
|
||||||
|
|
@ -112,23 +138,10 @@ static void k_event_post_internal(struct k_event *event, uint32_t events,
|
||||||
* 3. Ready each of the threads in the linked list
|
* 3. Ready each of the threads in the linked list
|
||||||
*/
|
*/
|
||||||
|
|
||||||
_WAIT_Q_FOR_EACH(&event->wait_q, thread) {
|
z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
|
||||||
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
|
|
||||||
|
|
||||||
if (are_wait_conditions_met(thread->events, events,
|
if (data.head != NULL) {
|
||||||
wait_condition)) {
|
thread = data.head;
|
||||||
/*
|
|
||||||
* The wait conditions have been satisfied. Add this
|
|
||||||
* thread to the list of threads to unpend.
|
|
||||||
*/
|
|
||||||
|
|
||||||
thread->next_event_link = head;
|
|
||||||
head = thread;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (head != NULL) {
|
|
||||||
thread = head;
|
|
||||||
do {
|
do {
|
||||||
z_unpend_thread(thread);
|
z_unpend_thread(thread);
|
||||||
arch_thread_return_value_set(thread, 0);
|
arch_thread_return_value_set(thread, 0);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue