posix: eventfd: fix dependency cycle between net and posix

Until recently, the posix api was purely a consumer of the
network subsystem. However, a dependency cycle was added as
a stop-gap solution for challenges with the native platform.

Specifically,

1. eventfd symbols conflict with those of the host
2. eventfd was excluded from native libc builds via cmake

If any part of the posix were then to select the network
subsystem (which is a legitimate use case, given that networking
is a part of the posix api), we would get a build error due to
the Kconfig dependency cycle.

As usual, with dependency cycles, the cycle can be broken
via a third, mutual dependency.

What is the third mutual dependency? Naturally, it is ZVFS
which was planned some time ago. ZVFS will be where we
collect file-descriptor and FILE-pointer APIs so that we can
ensure consistency for Zephyr users.

This change deprecates EVENTFD_MAX in favour of
ZVFS_EVENTFD_MAX.

Signed-off-by: Chris Friedt <cfriedt@tenstorrent.com>
This commit is contained in:
Chris Friedt 2024-05-28 19:39:06 -04:00 committed by David Leach
parent b3e36ad6e8
commit 487a8756c3
16 changed files with 628 additions and 509 deletions

View file

@ -9,7 +9,6 @@ implementation of the POSIX API.
* :kconfig:option:`CONFIG_DYNAMIC_THREAD`
* :kconfig:option:`CONFIG_DYNAMIC_THREAD_POOL_SIZE`
* :kconfig:option:`CONFIG_EVENTFD`
* :kconfig:option:`CONFIG_EVENTFD_MAX`
* :kconfig:option:`CONFIG_FDTABLE`
* :kconfig:option:`CONFIG_GETOPT_LONG`
* :kconfig:option:`CONFIG_MAX_PTHREAD_SPINLOCK_COUNT`
@ -38,3 +37,4 @@ implementation of the POSIX API.
* :kconfig:option:`CONFIG_POSIX_SEM_VALUE_MAX`
* :kconfig:option:`CONFIG_TIMER_CREATE_WAIT`
* :kconfig:option:`CONFIG_THREAD_STACK_INFO`
* :kconfig:option:`CONFIG_ZVFS_EVENTFD_MAX`

View file

@ -7,19 +7,16 @@
#ifndef ZEPHYR_INCLUDE_POSIX_SYS_EVENTFD_H_
#define ZEPHYR_INCLUDE_POSIX_SYS_EVENTFD_H_
#include <stdint.h>
#include <zephyr/kernel.h>
#include <zephyr/posix/fcntl.h>
#include <zephyr/zvfs/eventfd.h>
#ifdef __cplusplus
extern "C" {
#endif
#define EFD_SEMAPHORE 0x2
#define EFD_NONBLOCK O_NONBLOCK
#define EFD_SEMAPHORE ZVFS_EFD_SEMAPHORE
#define EFD_NONBLOCK ZVFS_EFD_NONBLOCK
typedef uint64_t eventfd_t;
typedef zvfs_eventfd_t eventfd_t;
/**
* @brief Create a file descriptor for event notification

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2020 Tobias Svehagen
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ZEPHYR_ZVFS_EVENTFD_H_
#define ZEPHYR_INCLUDE_ZEPHYR_ZVFS_EVENTFD_H_
#include <stdint.h>
#include <zephyr/kernel.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ZVFS_EFD_SEMAPHORE 2
#define ZVFS_EFD_NONBLOCK 0x4000
typedef uint64_t zvfs_eventfd_t;
/**
* @brief Create a file descriptor for ZVFS event notification
*
* The returned file descriptor can be used with POSIX read/write calls or
* with the @ref zvfs_eventfd_read or @ref zvfs_eventfd_write functions.
*
* It also supports polling and by including an eventfd in a call to poll,
* it is possible to signal and wake the polling thread by simply writing to
* the eventfd.
*
* When using read() and write() on a ZVFS eventfd, the size must always be at
* least 8 bytes or the operation will fail with EINVAL.
*
* @return New ZVFS eventfd file descriptor on success, -1 on error
*/
int zvfs_eventfd(unsigned int initval, int flags);
/**
* @brief Read from a ZVFS eventfd
*
* If call is successful, the value parameter will have the value 1
*
* @param fd File descriptor
* @param value Pointer for storing the read value
*
* @return 0 on success, -1 on error
*/
int zvfs_eventfd_read(int fd, zvfs_eventfd_t *value);
/**
* @brief Write to a ZVFS eventfd
*
* @param fd File descriptor
* @param value Value to write
*
* @return 0 on success, -1 on error
*/
int zvfs_eventfd_write(int fd, zvfs_eventfd_t value);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ZEPHYR_ZVFS_EVENTFD_H_ */

View file

@ -40,3 +40,5 @@ zephyr_library_include_directories(
${ZEPHYR_BASE}/kernel/include
${ZEPHYR_BASE}/arch/${ARCH}/include
)
add_subdirectory_ifdef(CONFIG_ZVFS zvfs)

View file

@ -123,5 +123,6 @@ config POWEROFF
Enable support for system power off.
rsource "Kconfig.cbprintf"
rsource "zvfs/Kconfig"
endmenu

View file

@ -0,0 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources_ifdef(CONFIG_ZVFS_EVENTFD zvfs_eventfd.c)

36
lib/os/zvfs/Kconfig Normal file
View file

@ -0,0 +1,36 @@
# Copyright (c) 2020 Tobias Svehagen
# Copyright (c) 2023 Meta
#
# SPDX-License-Identifier: Apache-2.0
menuconfig ZVFS
bool "Zephyr virtual filesystem (ZVFS) support [EXPERIMENTAL]"
select FDTABLE
select EXPERIMENTAL
help
ZVFS is a central, Zephyr-native library that provides a common interoperable API for all
types of file descriptors such as those from the non-virtual FS, sockets, eventfds, FILE *'s
and more. It is designed to be used by all Zephyr subsystems that need to work with files.
if ZVFS
config ZVFS_EVENTFD
bool "ZVFS event file descriptor support"
select POLL
help
Enable support for ZVFS event file descriptors. An eventfd can
be used as an event wait/notify mechanism together with POSIX calls
like read, write and poll.
if ZVFS_EVENTFD
config ZVFS_EVENTFD_MAX
int "Maximum number of ZVFS eventfd's"
default 1
range 1 4096
help
The maximum number of supported event file descriptors.
endif # ZVFS_EVENTFD
endif # ZVFS

485
lib/os/zvfs/zvfs_eventfd.c Normal file
View file

@ -0,0 +1,485 @@
/*
* Copyright (c) 2020 Tobias Svehagen
* Copyright (c) 2023, Meta
* Copyright (c) 2024, Tenstorrent AI ULC
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/net/socket.h>
#include <zephyr/posix/fcntl.h>
#include <zephyr/zvfs/eventfd.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/fdtable.h>
#include <zephyr/sys/math_extras.h>
#define ZVFS_EFD_IN_USE 0x1
#define ZVFS_EFD_FLAGS_SET (ZVFS_EFD_SEMAPHORE | ZVFS_EFD_NONBLOCK)
struct zvfs_eventfd {
struct k_poll_signal read_sig;
struct k_poll_signal write_sig;
struct k_spinlock lock;
zvfs_eventfd_t cnt;
int flags;
};
static ssize_t zvfs_eventfd_rw_op(void *obj, void *buf, size_t sz,
int (*op)(struct zvfs_eventfd *efd, zvfs_eventfd_t *value));
SYS_BITARRAY_DEFINE_STATIC(efds_bitarray, CONFIG_ZVFS_EVENTFD_MAX);
static struct zvfs_eventfd efds[CONFIG_ZVFS_EVENTFD_MAX];
static const struct fd_op_vtable zvfs_eventfd_fd_vtable;
static inline bool zvfs_eventfd_is_in_use(struct zvfs_eventfd *efd)
{
return (efd->flags & ZVFS_EFD_IN_USE) != 0;
}
static inline bool zvfs_eventfd_is_semaphore(struct zvfs_eventfd *efd)
{
return (efd->flags & ZVFS_EFD_SEMAPHORE) != 0;
}
static inline bool zvfs_eventfd_is_blocking(struct zvfs_eventfd *efd)
{
return (efd->flags & ZVFS_EFD_NONBLOCK) == 0;
}
static int zvfs_eventfd_poll_prepare(struct zvfs_eventfd *efd,
struct zsock_pollfd *pfd,
struct k_poll_event **pev,
struct k_poll_event *pev_end)
{
if (pfd->events & ZSOCK_POLLIN) {
if (*pev == pev_end) {
errno = ENOMEM;
return -1;
}
(*pev)->obj = &efd->read_sig;
(*pev)->type = K_POLL_TYPE_SIGNAL;
(*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
(*pev)->state = K_POLL_STATE_NOT_READY;
(*pev)++;
}
if (pfd->events & ZSOCK_POLLOUT) {
if (*pev == pev_end) {
errno = ENOMEM;
return -1;
}
(*pev)->obj = &efd->write_sig;
(*pev)->type = K_POLL_TYPE_SIGNAL;
(*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
(*pev)->state = K_POLL_STATE_NOT_READY;
(*pev)++;
}
return 0;
}
static int zvfs_eventfd_poll_update(struct zvfs_eventfd *efd,
struct zsock_pollfd *pfd,
struct k_poll_event **pev)
{
if (pfd->events & ZSOCK_POLLIN) {
pfd->revents |= ZSOCK_POLLIN * (efd->cnt > 0);
(*pev)++;
}
if (pfd->events & ZSOCK_POLLOUT) {
pfd->revents |= ZSOCK_POLLOUT * (efd->cnt < UINT64_MAX - 1);
(*pev)++;
}
return 0;
}
static int zvfs_eventfd_read_locked(struct zvfs_eventfd *efd, zvfs_eventfd_t *value)
{
if (!zvfs_eventfd_is_in_use(efd)) {
/* file descriptor has been closed */
return -EBADF;
}
if (efd->cnt == 0) {
/* would block / try again */
return -EAGAIN;
}
/* successful read */
if (zvfs_eventfd_is_semaphore(efd)) {
*value = 1;
--efd->cnt;
} else {
*value = efd->cnt;
efd->cnt = 0;
}
if (efd->cnt == 0) {
k_poll_signal_reset(&efd->read_sig);
}
k_poll_signal_raise(&efd->write_sig, 0);
return 0;
}
static int zvfs_eventfd_write_locked(struct zvfs_eventfd *efd, zvfs_eventfd_t *value)
{
zvfs_eventfd_t result;
if (!zvfs_eventfd_is_in_use(efd)) {
/* file descriptor has been closed */
return -EBADF;
}
if (*value == UINT64_MAX) {
/* not a permitted value */
return -EINVAL;
}
if (u64_add_overflow(efd->cnt, *value, &result) || result == UINT64_MAX) {
/* would block / try again */
return -EAGAIN;
}
/* successful write */
efd->cnt = result;
if (efd->cnt == (UINT64_MAX - 1)) {
k_poll_signal_reset(&efd->write_sig);
}
k_poll_signal_raise(&efd->read_sig, 0);
return 0;
}
static ssize_t zvfs_eventfd_read_op(void *obj, void *buf, size_t sz)
{
return zvfs_eventfd_rw_op(obj, buf, sz, zvfs_eventfd_read_locked);
}
static ssize_t zvfs_eventfd_write_op(void *obj, const void *buf, size_t sz)
{
return zvfs_eventfd_rw_op(obj, (zvfs_eventfd_t *)buf, sz, zvfs_eventfd_write_locked);
}
static int zvfs_eventfd_close_op(void *obj)
{
int ret;
int err;
k_spinlock_key_t key;
struct k_mutex *lock = NULL;
struct k_condvar *cond = NULL;
struct zvfs_eventfd *efd = (struct zvfs_eventfd *)obj;
if (k_is_in_isr()) {
/* not covered by the man page, but necessary in Zephyr */
errno = EWOULDBLOCK;
return -1;
}
err = (int)z_get_obj_lock_and_cond(obj, &zvfs_eventfd_fd_vtable, &lock, &cond);
__ASSERT((bool)err, "z_get_obj_lock_and_cond() failed");
__ASSERT_NO_MSG(lock != NULL);
__ASSERT_NO_MSG(cond != NULL);
err = k_mutex_lock(lock, K_FOREVER);
__ASSERT(err == 0, "k_mutex_lock() failed: %d", err);
key = k_spin_lock(&efd->lock);
if (!zvfs_eventfd_is_in_use(efd)) {
errno = EBADF;
ret = -1;
goto unlock;
}
err = sys_bitarray_free(&efds_bitarray, 1, (struct zvfs_eventfd *)obj - efds);
__ASSERT(err == 0, "sys_bitarray_free() failed: %d", err);
efd->flags = 0;
efd->cnt = 0;
ret = 0;
unlock:
k_spin_unlock(&efd->lock, key);
/* when closing an zvfs_eventfd, broadcast to all waiters */
err = k_condvar_broadcast(cond);
__ASSERT(err == 0, "k_condvar_broadcast() failed: %d", err);
err = k_mutex_unlock(lock);
__ASSERT(err == 0, "k_mutex_unlock() failed: %d", err);
return ret;
}
static int zvfs_eventfd_ioctl_op(void *obj, unsigned int request, va_list args)
{
int ret;
k_spinlock_key_t key;
struct zvfs_eventfd *efd = (struct zvfs_eventfd *)obj;
/* note: zsock_poll_internal() has already taken the mutex */
key = k_spin_lock(&efd->lock);
if (!zvfs_eventfd_is_in_use(efd)) {
errno = EBADF;
ret = -1;
goto unlock;
}
switch (request) {
case F_GETFL:
ret = efd->flags & ZVFS_EFD_FLAGS_SET;
break;
case F_SETFL: {
int flags;
flags = va_arg(args, int);
if (flags & ~ZVFS_EFD_FLAGS_SET) {
errno = EINVAL;
ret = -1;
} else {
int prev_flags = efd->flags & ~ZVFS_EFD_FLAGS_SET;
efd->flags = flags | prev_flags;
ret = 0;
}
} break;
case ZFD_IOCTL_POLL_PREPARE: {
struct zsock_pollfd *pfd;
struct k_poll_event **pev;
struct k_poll_event *pev_end;
pfd = va_arg(args, struct zsock_pollfd *);
pev = va_arg(args, struct k_poll_event **);
pev_end = va_arg(args, struct k_poll_event *);
ret = zvfs_eventfd_poll_prepare(obj, pfd, pev, pev_end);
} break;
case ZFD_IOCTL_POLL_UPDATE: {
struct zsock_pollfd *pfd;
struct k_poll_event **pev;
pfd = va_arg(args, struct zsock_pollfd *);
pev = va_arg(args, struct k_poll_event **);
ret = zvfs_eventfd_poll_update(obj, pfd, pev);
} break;
default:
errno = EOPNOTSUPP;
ret = -1;
break;
}
unlock:
k_spin_unlock(&efd->lock, key);
return ret;
}
static const struct fd_op_vtable zvfs_eventfd_fd_vtable = {
.read = zvfs_eventfd_read_op,
.write = zvfs_eventfd_write_op,
.close = zvfs_eventfd_close_op,
.ioctl = zvfs_eventfd_ioctl_op,
};
/* common to both zvfs_eventfd_read_op() and zvfs_eventfd_write_op() */
static ssize_t zvfs_eventfd_rw_op(void *obj, void *buf, size_t sz,
int (*op)(struct zvfs_eventfd *efd, zvfs_eventfd_t *value))
{
int err;
ssize_t ret;
k_spinlock_key_t key;
struct zvfs_eventfd *efd = obj;
struct k_mutex *lock = NULL;
struct k_condvar *cond = NULL;
if (sz < sizeof(zvfs_eventfd_t)) {
errno = EINVAL;
return -1;
}
if (buf == NULL) {
errno = EFAULT;
return -1;
}
key = k_spin_lock(&efd->lock);
if (!zvfs_eventfd_is_blocking(efd)) {
/*
* Handle the non-blocking case entirely within this scope
*/
ret = op(efd, buf);
if (ret < 0) {
errno = -ret;
ret = -1;
} else {
ret = sizeof(zvfs_eventfd_t);
}
goto unlock_spin;
}
/*
* Handle the blocking case below
*/
__ASSERT_NO_MSG(zvfs_eventfd_is_blocking(efd));
if (k_is_in_isr()) {
/* not covered by the man page, but necessary in Zephyr */
errno = EWOULDBLOCK;
ret = -1;
goto unlock_spin;
}
err = (int)z_get_obj_lock_and_cond(obj, &zvfs_eventfd_fd_vtable, &lock, &cond);
__ASSERT((bool)err, "z_get_obj_lock_and_cond() failed");
__ASSERT_NO_MSG(lock != NULL);
__ASSERT_NO_MSG(cond != NULL);
/* do not hold a spinlock when taking a mutex */
k_spin_unlock(&efd->lock, key);
err = k_mutex_lock(lock, K_FOREVER);
__ASSERT(err == 0, "k_mutex_lock() failed: %d", err);
while (true) {
/* retake the spinlock */
key = k_spin_lock(&efd->lock);
ret = op(efd, buf);
switch (ret) {
case -EAGAIN:
/* not an error in blocking mode. break and try again */
break;
case 0:
/* success! */
ret = sizeof(zvfs_eventfd_t);
goto unlock_mutex;
default:
/* some other error */
__ASSERT_NO_MSG(ret < 0);
errno = -ret;
ret = -1;
goto unlock_mutex;
}
/* do not hold a spinlock when taking a mutex */
k_spin_unlock(&efd->lock, key);
/* wait for a write or close */
err = k_condvar_wait(cond, lock, K_FOREVER);
__ASSERT(err == 0, "k_condvar_wait() failed: %d", err);
}
unlock_mutex:
k_spin_unlock(&efd->lock, key);
/* only wake a single waiter */
err = k_condvar_signal(cond);
__ASSERT(err == 0, "k_condvar_signal() failed: %d", err);
err = k_mutex_unlock(lock);
__ASSERT(err == 0, "k_mutex_unlock() failed: %d", err);
goto out;
unlock_spin:
k_spin_unlock(&efd->lock, key);
out:
return ret;
}
/*
* Public-facing API
*/
int zvfs_eventfd(unsigned int initval, int flags)
{
int fd = 1;
size_t offset;
struct zvfs_eventfd *efd = NULL;
if (flags & ~ZVFS_EFD_FLAGS_SET) {
errno = EINVAL;
return -1;
}
if (sys_bitarray_alloc(&efds_bitarray, 1, &offset) < 0) {
errno = ENOMEM;
return -1;
}
efd = &efds[offset];
fd = z_reserve_fd();
if (fd < 0) {
sys_bitarray_free(&efds_bitarray, 1, offset);
return -1;
}
efd->flags = ZVFS_EFD_IN_USE | flags;
efd->cnt = initval;
k_poll_signal_init(&efd->write_sig);
k_poll_signal_init(&efd->read_sig);
if (initval != 0) {
k_poll_signal_raise(&efd->read_sig, 0);
}
k_poll_signal_raise(&efd->write_sig, 0);
z_finalize_fd(fd, efd, &zvfs_eventfd_fd_vtable);
return fd;
}
int zvfs_eventfd_read(int fd, zvfs_eventfd_t *value)
{
int ret;
void *obj;
obj = z_get_fd_obj(fd, &zvfs_eventfd_fd_vtable, EBADF);
if (obj == NULL) {
return -1;
}
ret = zvfs_eventfd_rw_op(obj, value, sizeof(zvfs_eventfd_t), zvfs_eventfd_read_locked);
__ASSERT_NO_MSG(ret == -1 || ret == sizeof(zvfs_eventfd_t));
if (ret < 0) {
return -1;
}
return 0;
}
int zvfs_eventfd_write(int fd, zvfs_eventfd_t value)
{
int ret;
void *obj;
obj = z_get_fd_obj(fd, &zvfs_eventfd_fd_vtable, EBADF);
if (obj == NULL) {
return -1;
}
ret = zvfs_eventfd_rw_op(obj, &value, sizeof(zvfs_eventfd_t), zvfs_eventfd_write_locked);
__ASSERT_NO_MSG(ret == -1 || ret == sizeof(zvfs_eventfd_t));
if (ret < 0) {
return -1;
}
return 0;
}

View file

@ -5,26 +5,15 @@
menu "Miscellaneous POSIX-related options"
menuconfig EVENTFD
config EVENTFD
bool "Support for eventfd"
depends on !NATIVE_APPLICATION
select POLL
select FDTABLE
select ZVFS
select ZVFS_EVENTFD
default y if POSIX_API
help
Enable support for event file descriptors, eventfd. An eventfd can
be used as an event wait/notify mechanism together with POSIX calls
like read, write and poll.
if EVENTFD
config EVENTFD_MAX
int "Maximum number of eventfd's"
default 1
range 1 4096
help
The maximum number of supported event file descriptors.
endif # EVENTFD
endmenu

View file

@ -6,6 +6,15 @@
menu "Deprecated POSIX options"
config EVENTFD_MAX
int "Maximum number of eventfd's [DEPRECATED]"
default ZVFS_EVENTFD_MAX if ZVFS_EVENTFD
default 0
help
This option is deprecated.
Please use CONFIG_ZVFS_EVENTFD_MAX instead.
config FNMATCH
bool "Support for fnmatch [DEPRECATED]"
select DEPRECATED

View file

@ -1,484 +1,23 @@
/*
* Copyright (c) 2020 Tobias Svehagen
* Copyright (c) 2023, Meta
* Copyright (c) 2024, Tenstorrent AI ULC
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ksched.h>
#include <zephyr/kernel.h>
#include <zephyr/net/socket.h>
#include <zephyr/posix/sys/eventfd.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/fdtable.h>
#include <zephyr/sys/math_extras.h>
#define EFD_IN_USE 0x1
#define EFD_FLAGS_SET (EFD_SEMAPHORE | EFD_NONBLOCK)
struct eventfd {
struct k_poll_signal read_sig;
struct k_poll_signal write_sig;
struct k_spinlock lock;
eventfd_t cnt;
int flags;
};
static ssize_t eventfd_rw_op(void *obj, void *buf, size_t sz,
int (*op)(struct eventfd *efd, eventfd_t *value));
SYS_BITARRAY_DEFINE_STATIC(efds_bitarray, CONFIG_EVENTFD_MAX);
static struct eventfd efds[CONFIG_EVENTFD_MAX];
static const struct fd_op_vtable eventfd_fd_vtable;
static inline bool eventfd_is_in_use(struct eventfd *efd)
{
return (efd->flags & EFD_IN_USE) != 0;
}
static inline bool eventfd_is_semaphore(struct eventfd *efd)
{
return (efd->flags & EFD_SEMAPHORE) != 0;
}
static inline bool eventfd_is_blocking(struct eventfd *efd)
{
return (efd->flags & EFD_NONBLOCK) == 0;
}
static int eventfd_poll_prepare(struct eventfd *efd,
struct zsock_pollfd *pfd,
struct k_poll_event **pev,
struct k_poll_event *pev_end)
{
if (pfd->events & ZSOCK_POLLIN) {
if (*pev == pev_end) {
errno = ENOMEM;
return -1;
}
(*pev)->obj = &efd->read_sig;
(*pev)->type = K_POLL_TYPE_SIGNAL;
(*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
(*pev)->state = K_POLL_STATE_NOT_READY;
(*pev)++;
}
if (pfd->events & ZSOCK_POLLOUT) {
if (*pev == pev_end) {
errno = ENOMEM;
return -1;
}
(*pev)->obj = &efd->write_sig;
(*pev)->type = K_POLL_TYPE_SIGNAL;
(*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
(*pev)->state = K_POLL_STATE_NOT_READY;
(*pev)++;
}
return 0;
}
static int eventfd_poll_update(struct eventfd *efd,
struct zsock_pollfd *pfd,
struct k_poll_event **pev)
{
if (pfd->events & ZSOCK_POLLIN) {
pfd->revents |= ZSOCK_POLLIN * (efd->cnt > 0);
(*pev)++;
}
if (pfd->events & ZSOCK_POLLOUT) {
pfd->revents |= ZSOCK_POLLOUT * (efd->cnt < UINT64_MAX - 1);
(*pev)++;
}
return 0;
}
static int eventfd_read_locked(struct eventfd *efd, eventfd_t *value)
{
if (!eventfd_is_in_use(efd)) {
/* file descriptor has been closed */
return -EBADF;
}
if (efd->cnt == 0) {
/* would block / try again */
return -EAGAIN;
}
/* successful read */
if (eventfd_is_semaphore(efd)) {
*value = 1;
--efd->cnt;
} else {
*value = efd->cnt;
efd->cnt = 0;
}
if (efd->cnt == 0) {
k_poll_signal_reset(&efd->read_sig);
}
k_poll_signal_raise(&efd->write_sig, 0);
return 0;
}
static int eventfd_write_locked(struct eventfd *efd, eventfd_t *value)
{
eventfd_t result;
if (!eventfd_is_in_use(efd)) {
/* file descriptor has been closed */
return -EBADF;
}
if (*value == UINT64_MAX) {
/* not a permitted value */
return -EINVAL;
}
if (u64_add_overflow(efd->cnt, *value, &result) || result == UINT64_MAX) {
/* would block / try again */
return -EAGAIN;
}
/* successful write */
efd->cnt = result;
if (efd->cnt == (UINT64_MAX - 1)) {
k_poll_signal_reset(&efd->write_sig);
}
k_poll_signal_raise(&efd->read_sig, 0);
return 0;
}
static ssize_t eventfd_read_op(void *obj, void *buf, size_t sz)
{
return eventfd_rw_op(obj, buf, sz, eventfd_read_locked);
}
static ssize_t eventfd_write_op(void *obj, const void *buf, size_t sz)
{
return eventfd_rw_op(obj, (eventfd_t *)buf, sz, eventfd_write_locked);
}
static int eventfd_close_op(void *obj)
{
int ret;
int err;
k_spinlock_key_t key;
struct k_mutex *lock = NULL;
struct k_condvar *cond = NULL;
struct eventfd *efd = (struct eventfd *)obj;
if (k_is_in_isr()) {
/* not covered by the man page, but necessary in Zephyr */
errno = EWOULDBLOCK;
return -1;
}
err = (int)z_get_obj_lock_and_cond(obj, &eventfd_fd_vtable, &lock, &cond);
__ASSERT((bool)err, "z_get_obj_lock_and_cond() failed");
__ASSERT_NO_MSG(lock != NULL);
__ASSERT_NO_MSG(cond != NULL);
err = k_mutex_lock(lock, K_FOREVER);
__ASSERT(err == 0, "k_mutex_lock() failed: %d", err);
key = k_spin_lock(&efd->lock);
if (!eventfd_is_in_use(efd)) {
errno = EBADF;
ret = -1;
goto unlock;
}
err = sys_bitarray_free(&efds_bitarray, 1, (struct eventfd *)obj - efds);
__ASSERT(err == 0, "sys_bitarray_free() failed: %d", err);
efd->flags = 0;
efd->cnt = 0;
ret = 0;
unlock:
k_spin_unlock(&efd->lock, key);
/* when closing an eventfd, broadcast to all waiters */
err = k_condvar_broadcast(cond);
__ASSERT(err == 0, "k_condvar_broadcast() failed: %d", err);
err = k_mutex_unlock(lock);
__ASSERT(err == 0, "k_mutex_unlock() failed: %d", err);
return ret;
}
static int eventfd_ioctl_op(void *obj, unsigned int request, va_list args)
{
int ret;
k_spinlock_key_t key;
struct eventfd *efd = (struct eventfd *)obj;
/* note: zsock_poll_internal() has already taken the mutex */
key = k_spin_lock(&efd->lock);
if (!eventfd_is_in_use(efd)) {
errno = EBADF;
ret = -1;
goto unlock;
}
switch (request) {
case F_GETFL:
ret = efd->flags & EFD_FLAGS_SET;
break;
case F_SETFL: {
int flags;
flags = va_arg(args, int);
if (flags & ~EFD_FLAGS_SET) {
errno = EINVAL;
ret = -1;
} else {
int prev_flags = efd->flags & ~EFD_FLAGS_SET;
efd->flags = flags | prev_flags;
ret = 0;
}
} break;
case ZFD_IOCTL_POLL_PREPARE: {
struct zsock_pollfd *pfd;
struct k_poll_event **pev;
struct k_poll_event *pev_end;
pfd = va_arg(args, struct zsock_pollfd *);
pev = va_arg(args, struct k_poll_event **);
pev_end = va_arg(args, struct k_poll_event *);
ret = eventfd_poll_prepare(obj, pfd, pev, pev_end);
} break;
case ZFD_IOCTL_POLL_UPDATE: {
struct zsock_pollfd *pfd;
struct k_poll_event **pev;
pfd = va_arg(args, struct zsock_pollfd *);
pev = va_arg(args, struct k_poll_event **);
ret = eventfd_poll_update(obj, pfd, pev);
} break;
default:
errno = EOPNOTSUPP;
ret = -1;
break;
}
unlock:
k_spin_unlock(&efd->lock, key);
return ret;
}
static const struct fd_op_vtable eventfd_fd_vtable = {
.read = eventfd_read_op,
.write = eventfd_write_op,
.close = eventfd_close_op,
.ioctl = eventfd_ioctl_op,
};
/* common to both eventfd_read_op() and eventfd_write_op() */
static ssize_t eventfd_rw_op(void *obj, void *buf, size_t sz,
int (*op)(struct eventfd *efd, eventfd_t *value))
{
int err;
ssize_t ret;
k_spinlock_key_t key;
struct eventfd *efd = obj;
struct k_mutex *lock = NULL;
struct k_condvar *cond = NULL;
if (sz < sizeof(eventfd_t)) {
errno = EINVAL;
return -1;
}
if (buf == NULL) {
errno = EFAULT;
return -1;
}
key = k_spin_lock(&efd->lock);
if (!eventfd_is_blocking(efd)) {
/*
* Handle the non-blocking case entirely within this scope
*/
ret = op(efd, buf);
if (ret < 0) {
errno = -ret;
ret = -1;
} else {
ret = sizeof(eventfd_t);
}
goto unlock_spin;
}
/*
* Handle the blocking case below
*/
__ASSERT_NO_MSG(eventfd_is_blocking(efd));
if (k_is_in_isr()) {
/* not covered by the man page, but necessary in Zephyr */
errno = EWOULDBLOCK;
ret = -1;
goto unlock_spin;
}
err = (int)z_get_obj_lock_and_cond(obj, &eventfd_fd_vtable, &lock, &cond);
__ASSERT((bool)err, "z_get_obj_lock_and_cond() failed");
__ASSERT_NO_MSG(lock != NULL);
__ASSERT_NO_MSG(cond != NULL);
/* do not hold a spinlock when taking a mutex */
k_spin_unlock(&efd->lock, key);
err = k_mutex_lock(lock, K_FOREVER);
__ASSERT(err == 0, "k_mutex_lock() failed: %d", err);
while (true) {
/* retake the spinlock */
key = k_spin_lock(&efd->lock);
ret = op(efd, buf);
switch (ret) {
case -EAGAIN:
/* not an error in blocking mode. break and try again */
break;
case 0:
/* success! */
ret = sizeof(eventfd_t);
goto unlock_mutex;
default:
/* some other error */
__ASSERT_NO_MSG(ret < 0);
errno = -ret;
ret = -1;
goto unlock_mutex;
}
/* do not hold a spinlock when taking a mutex */
k_spin_unlock(&efd->lock, key);
/* wait for a write or close */
err = k_condvar_wait(cond, lock, K_FOREVER);
__ASSERT(err == 0, "k_condvar_wait() failed: %d", err);
}
unlock_mutex:
k_spin_unlock(&efd->lock, key);
/* only wake a single waiter */
err = k_condvar_signal(cond);
__ASSERT(err == 0, "k_condvar_signal() failed: %d", err);
err = k_mutex_unlock(lock);
__ASSERT(err == 0, "k_mutex_unlock() failed: %d", err);
goto out;
unlock_spin:
k_spin_unlock(&efd->lock, key);
out:
return ret;
}
/*
* Public-facing API
*/
#include <zephyr/zvfs/eventfd.h>
int eventfd(unsigned int initval, int flags)
{
int fd = 1;
size_t offset;
struct eventfd *efd = NULL;
if (flags & ~EFD_FLAGS_SET) {
errno = EINVAL;
return -1;
}
if (sys_bitarray_alloc(&efds_bitarray, 1, &offset) < 0) {
errno = ENOMEM;
return -1;
}
efd = &efds[offset];
fd = z_reserve_fd();
if (fd < 0) {
sys_bitarray_free(&efds_bitarray, 1, offset);
return -1;
}
efd->flags = EFD_IN_USE | flags;
efd->cnt = initval;
k_poll_signal_init(&efd->write_sig);
k_poll_signal_init(&efd->read_sig);
if (initval != 0) {
k_poll_signal_raise(&efd->read_sig, 0);
}
k_poll_signal_raise(&efd->write_sig, 0);
z_finalize_fd(fd, efd, &eventfd_fd_vtable);
return fd;
return zvfs_eventfd(initval, flags);
}
int eventfd_read(int fd, eventfd_t *value)
{
int ret;
void *obj;
obj = z_get_fd_obj(fd, &eventfd_fd_vtable, EBADF);
if (obj == NULL) {
return -1;
}
ret = eventfd_rw_op(obj, value, sizeof(eventfd_t), eventfd_read_locked);
__ASSERT_NO_MSG(ret == -1 || ret == sizeof(eventfd_t));
if (ret < 0) {
return -1;
}
return 0;
return zvfs_eventfd_read(fd, value);
}
int eventfd_write(int fd, eventfd_t value)
{
int ret;
void *obj;
obj = z_get_fd_obj(fd, &eventfd_fd_vtable, EBADF);
if (obj == NULL) {
return -1;
}
ret = eventfd_rw_op(obj, &value, sizeof(eventfd_t), eventfd_write_locked);
__ASSERT_NO_MSG(ret == -1 || ret == sizeof(eventfd_t));
if (ret < 0) {
return -1;
}
return 0;
return zvfs_eventfd_write(fd, value);
}

View file

@ -10,18 +10,7 @@ LOG_MODULE_REGISTER(net_sock_svc, CONFIG_NET_SOCKETS_LOG_LEVEL);
#include <zephyr/kernel.h>
#include <zephyr/init.h>
#include <zephyr/net/socket_service.h>
#include <zephyr/posix/sys/eventfd.h>
/* Next checks makes sure that we are not trying to use this library
* with eventfd if CONFIG_POSIX_API is not set and if using native_sim
* based board. The reason is that we should always use zephyr libc based
* eventfd implementation instead of host libc one.
*/
#if defined(CONFIG_NATIVE_LIBC) && defined(CONFIG_EVENTFD)
#error "The eventfd support CONFIG_EVENTFD will not work with host libc "
"so you need to enable CONFIG_POSIX_API in this case which will turn "
"off the host libc usage."
#endif
#include <zephyr/zvfs/eventfd.h>
static int init_socket_service(void);
static bool init_done;
@ -94,7 +83,7 @@ int z_impl_net_socket_service_register(const struct net_socket_service_desc *svc
}
/* Tell the thread to re-read the variables */
eventfd_write(ctx.events[0].fd, 1);
zvfs_eventfd_write(ctx.events[0].fd, 1);
ret = 0;
out:
@ -191,7 +180,7 @@ static int trigger_work(struct zsock_pollfd *pev)
static void socket_service_thread(void)
{
int ret, i, fd, count = 0;
eventfd_t value;
zvfs_eventfd_t value;
STRUCT_SECTION_COUNT(net_socket_service_desc, &ret);
if (ret == 0) {
@ -222,11 +211,11 @@ static void socket_service_thread(void)
ctx.count = count + 1;
/* Create an eventfd that can be used to trigger events during polling */
fd = eventfd(0, 0);
/* Create an zvfs_eventfd that can be used to trigger events during polling */
fd = zvfs_eventfd(0, 0);
if (fd < 0) {
fd = -errno;
NET_ERR("eventfd failed (%d)", fd);
NET_ERR("zvfs_eventfd failed (%d)", fd);
goto out;
}
@ -264,7 +253,7 @@ restart:
}
if (ret > 0 && ctx.events[0].revents) {
eventfd_read(ctx.events[0].fd, &value);
zvfs_eventfd_read(ctx.events[0].fd, &value);
NET_DBG("Received restart event.");
goto restart;
}

View file

@ -11,7 +11,7 @@ CONFIG_ZTEST_STACK_SIZE=1024
CONFIG_ZVFS_OPEN_MAX=10
CONFIG_REQUIRES_FULL_LIBC=y
CONFIG_EVENTFD_MAX=10
CONFIG_ZVFS_EVENTFD_MAX=10
CONFIG_NET_MAX_CONTEXTS=10
CONFIG_NET_MAX_CONN=10

View file

@ -9,7 +9,7 @@ CONFIG_ENTROPY_GENERATOR=y
CONFIG_TEST_RANDOM_GENERATOR=y
CONFIG_ZVFS_OPEN_MAX=10
CONFIG_REQUIRES_FULL_LIBC=y
CONFIG_EVENTFD_MAX=10
CONFIG_ZVFS_EVENTFD_MAX=10
CONFIG_NET_MAX_CONTEXTS=10
CONFIG_NET_MAX_CONN=10

View file

@ -41,7 +41,7 @@ CONFIG_ZVFS_OPEN_MAX=32
CONFIG_NET_SOCKETS_POLL_MAX=32
CONFIG_ZVFS_OPEN_MAX=32
CONFIG_REQUIRES_FULL_LIBC=y
CONFIG_EVENTFD_MAX=10
CONFIG_ZVFS_EVENTFD_MAX=10
CONFIG_NET_MAX_CONTEXTS=10
CONFIG_NET_MAX_CONN=10

View file

@ -5,6 +5,8 @@
*/
#include "_main.h"
#include <zephyr/posix/fcntl.h>
#include <zephyr/posix/sys/ioctl.h>
#define EFD_IN_USE_INTERNAL 0x1