The POSIX_MAX_FDS option does not correspond to any standard POSIX option. It was used to define the size of the file descriptor table, which is by no means exclusively used by POSIX (also net, fs, ...). POSIX_MAX_FDS is being deprecated in order to ensure that Zephyr's POSIX Kconfig variables correspond to those defined in the specification, as of IEEE 1003.1-2017. Namely, POSIX_OPEN_MAX. CONFIG_POSIX_MAX_OPEN_FILES is being deprecated for the same reason. To mitigate any possible layering violations, that option is not user selectable. It tracks the newly added CONFIG_ZVFS_OPEN_MAX option, which is native to Zephyr. With this deprecation, we introduce the following Kconfig options that map directly to standard POSIX Option Groups by simply removing "CONFIG_": * CONFIG_POSIX_DEVICE_IO Similarly, with this deprecation, we introduce the following Kconfig options that map directly to standard POSIX Options by simply removing "CONFIG": * CONFIG_POSIX_OPEN_MAX In order to maintain parity with the current feature set, we introduce the following Kconfig options. * CONFIG_POSIX_DEVICE_IO_ALIAS_CLOSE * CONFIG_POSIX_DEVICE_IO_ALIAS_OPEN * CONFIG_POSIX_DEVICE_IO_ALIAS_READ * CONFIG_POSIX_DEVICE_IO_ALIAS_WRITE Gate open(), close(), read(), and write() via the CONFIG_POSIX_DEVICE_IO Kconfig option and move implementations into device_io.c, to be conformant with the spec. Lastly, stage function names for upcoming ZVFS work, to be completed as part of the LTSv3 Roadmap (e.g. zvfs_open(), ..). Signed-off-by: Chris Friedt <cfriedt@tenstorrent.com>
440 lines
8.4 KiB
C
440 lines
8.4 KiB
C
/*
|
|
* Copyright (c) 2018 Linaro Limited
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief File descriptor table
|
|
*
|
|
* This file provides generic file descriptor table implementation, suitable
|
|
* for any I/O object implementing POSIX I/O semantics (i.e. read/write +
|
|
* aux operations).
|
|
*/
|
|
|
|
#include <errno.h>
|
|
#include <string.h>
|
|
|
|
#include <zephyr/posix/fcntl.h>
|
|
#include <zephyr/kernel.h>
|
|
#include <zephyr/sys/fdtable.h>
|
|
#include <zephyr/sys/speculation.h>
|
|
#include <zephyr/internal/syscall_handler.h>
|
|
#include <zephyr/sys/atomic.h>
|
|
|
|
struct fd_entry {
|
|
void *obj;
|
|
const struct fd_op_vtable *vtable;
|
|
atomic_t refcount;
|
|
struct k_mutex lock;
|
|
struct k_condvar cond;
|
|
};
|
|
|
|
#if defined(CONFIG_POSIX_DEVICE_IO)
|
|
static const struct fd_op_vtable stdinout_fd_op_vtable;
|
|
|
|
BUILD_ASSERT(CONFIG_ZVFS_OPEN_MAX >= 3, "CONFIG_ZVFS_OPEN_MAX >= 3 for CONFIG_POSIX_DEVICE_IO");
|
|
#endif /* defined(CONFIG_POSIX_DEVICE_IO) */
|
|
|
|
static struct fd_entry fdtable[CONFIG_ZVFS_OPEN_MAX] = {
|
|
#if defined(CONFIG_POSIX_DEVICE_IO)
|
|
/*
|
|
* Predefine entries for stdin/stdout/stderr.
|
|
*/
|
|
{
|
|
/* STDIN */
|
|
.vtable = &stdinout_fd_op_vtable,
|
|
.refcount = ATOMIC_INIT(1),
|
|
.lock = Z_MUTEX_INITIALIZER(fdtable[0].lock),
|
|
.cond = Z_CONDVAR_INITIALIZER(fdtable[0].cond),
|
|
},
|
|
{
|
|
/* STDOUT */
|
|
.vtable = &stdinout_fd_op_vtable,
|
|
.refcount = ATOMIC_INIT(1),
|
|
.lock = Z_MUTEX_INITIALIZER(fdtable[1].lock),
|
|
.cond = Z_CONDVAR_INITIALIZER(fdtable[1].cond),
|
|
},
|
|
{
|
|
/* STDERR */
|
|
.vtable = &stdinout_fd_op_vtable,
|
|
.refcount = ATOMIC_INIT(1),
|
|
.lock = Z_MUTEX_INITIALIZER(fdtable[2].lock),
|
|
.cond = Z_CONDVAR_INITIALIZER(fdtable[2].cond),
|
|
},
|
|
#else
|
|
{0},
|
|
#endif
|
|
};
|
|
|
|
static K_MUTEX_DEFINE(fdtable_lock);
|
|
|
|
static int z_fd_ref(int fd)
|
|
{
|
|
return atomic_inc(&fdtable[fd].refcount) + 1;
|
|
}
|
|
|
|
static int z_fd_unref(int fd)
|
|
{
|
|
atomic_val_t old_rc;
|
|
|
|
/* Reference counter must be checked to avoid decrement refcount below
|
|
* zero causing file descriptor leak. Loop statement below executes
|
|
* atomic decrement if refcount value is grater than zero. Otherwise,
|
|
* refcount is not going to be written.
|
|
*/
|
|
do {
|
|
old_rc = atomic_get(&fdtable[fd].refcount);
|
|
if (!old_rc) {
|
|
return 0;
|
|
}
|
|
} while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1));
|
|
|
|
if (old_rc != 1) {
|
|
return old_rc - 1;
|
|
}
|
|
|
|
fdtable[fd].obj = NULL;
|
|
fdtable[fd].vtable = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int _find_fd_entry(void)
|
|
{
|
|
int fd;
|
|
|
|
for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
|
|
if (!atomic_get(&fdtable[fd].refcount)) {
|
|
return fd;
|
|
}
|
|
}
|
|
|
|
errno = ENFILE;
|
|
return -1;
|
|
}
|
|
|
|
static int _check_fd(int fd)
|
|
{
|
|
if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
|
|
errno = EBADF;
|
|
return -1;
|
|
}
|
|
|
|
fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable));
|
|
|
|
if (!atomic_get(&fdtable[fd].refcount)) {
|
|
errno = EBADF;
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_ZTEST
|
|
bool fdtable_fd_is_initialized(int fd)
|
|
{
|
|
struct k_mutex ref_lock;
|
|
struct k_condvar ref_cond;
|
|
|
|
if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
|
|
return false;
|
|
}
|
|
|
|
ref_lock = (struct k_mutex)Z_MUTEX_INITIALIZER(fdtable[fd].lock);
|
|
if (memcmp(&ref_lock, &fdtable[fd].lock, sizeof(ref_lock)) != 0) {
|
|
return false;
|
|
}
|
|
|
|
ref_cond = (struct k_condvar)Z_CONDVAR_INITIALIZER(fdtable[fd].cond);
|
|
if (memcmp(&ref_cond, &fdtable[fd].cond, sizeof(ref_cond)) != 0) {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif /* CONFIG_ZTEST */
|
|
|
|
void *z_get_fd_obj(int fd, const struct fd_op_vtable *vtable, int err)
|
|
{
|
|
struct fd_entry *entry;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return NULL;
|
|
}
|
|
|
|
entry = &fdtable[fd];
|
|
|
|
if (vtable != NULL && entry->vtable != vtable) {
|
|
errno = err;
|
|
return NULL;
|
|
}
|
|
|
|
return entry->obj;
|
|
}
|
|
|
|
static int z_get_fd_by_obj_and_vtable(void *obj, const struct fd_op_vtable *vtable)
|
|
{
|
|
int fd;
|
|
|
|
for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
|
|
if (fdtable[fd].obj == obj && fdtable[fd].vtable == vtable) {
|
|
return fd;
|
|
}
|
|
}
|
|
|
|
errno = ENFILE;
|
|
return -1;
|
|
}
|
|
|
|
bool z_get_obj_lock_and_cond(void *obj, const struct fd_op_vtable *vtable, struct k_mutex **lock,
|
|
struct k_condvar **cond)
|
|
{
|
|
int fd;
|
|
struct fd_entry *entry;
|
|
|
|
fd = z_get_fd_by_obj_and_vtable(obj, vtable);
|
|
if (_check_fd(fd) < 0) {
|
|
return false;
|
|
}
|
|
|
|
entry = &fdtable[fd];
|
|
|
|
if (lock) {
|
|
*lock = &entry->lock;
|
|
}
|
|
|
|
if (cond) {
|
|
*cond = &entry->cond;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void *z_get_fd_obj_and_vtable(int fd, const struct fd_op_vtable **vtable,
|
|
struct k_mutex **lock)
|
|
{
|
|
struct fd_entry *entry;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return NULL;
|
|
}
|
|
|
|
entry = &fdtable[fd];
|
|
*vtable = entry->vtable;
|
|
|
|
if (lock) {
|
|
*lock = &entry->lock;
|
|
}
|
|
|
|
return entry->obj;
|
|
}
|
|
|
|
int z_reserve_fd(void)
|
|
{
|
|
int fd;
|
|
|
|
(void)k_mutex_lock(&fdtable_lock, K_FOREVER);
|
|
|
|
fd = _find_fd_entry();
|
|
if (fd >= 0) {
|
|
/* Mark entry as used, z_finalize_fd() will fill it in. */
|
|
(void)z_fd_ref(fd);
|
|
fdtable[fd].obj = NULL;
|
|
fdtable[fd].vtable = NULL;
|
|
k_mutex_init(&fdtable[fd].lock);
|
|
k_condvar_init(&fdtable[fd].cond);
|
|
}
|
|
|
|
k_mutex_unlock(&fdtable_lock);
|
|
|
|
return fd;
|
|
}
|
|
|
|
void z_finalize_fd(int fd, void *obj, const struct fd_op_vtable *vtable)
|
|
{
|
|
/* Assumes fd was already bounds-checked. */
|
|
#ifdef CONFIG_USERSPACE
|
|
/* descriptor context objects are inserted into the table when they
|
|
* are ready for use. Mark the object as initialized and grant the
|
|
* caller (and only the caller) access.
|
|
*
|
|
* This call is a no-op if obj is invalid or points to something
|
|
* not a kernel object.
|
|
*/
|
|
k_object_recycle(obj);
|
|
#endif
|
|
fdtable[fd].obj = obj;
|
|
fdtable[fd].vtable = vtable;
|
|
|
|
/* Let the object know about the lock just in case it needs it
|
|
* for something. For BSD sockets, the lock is used with condition
|
|
* variables to avoid keeping the lock for a long period of time.
|
|
*/
|
|
if (vtable && vtable->ioctl) {
|
|
(void)z_fdtable_call_ioctl(vtable, obj, ZFD_IOCTL_SET_LOCK,
|
|
&fdtable[fd].lock);
|
|
}
|
|
}
|
|
|
|
void z_free_fd(int fd)
|
|
{
|
|
/* Assumes fd was already bounds-checked. */
|
|
(void)z_fd_unref(fd);
|
|
}
|
|
|
|
int z_alloc_fd(void *obj, const struct fd_op_vtable *vtable)
|
|
{
|
|
int fd;
|
|
|
|
fd = z_reserve_fd();
|
|
if (fd >= 0) {
|
|
z_finalize_fd(fd, obj, vtable);
|
|
}
|
|
|
|
return fd;
|
|
}
|
|
|
|
ssize_t zvfs_read(int fd, void *buf, size_t sz)
|
|
{
|
|
ssize_t res;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
|
|
|
|
res = fdtable[fd].vtable->read(fdtable[fd].obj, buf, sz);
|
|
|
|
k_mutex_unlock(&fdtable[fd].lock);
|
|
|
|
return res;
|
|
}
|
|
|
|
ssize_t zvfs_write(int fd, const void *buf, size_t sz)
|
|
{
|
|
ssize_t res;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
|
|
|
|
res = fdtable[fd].vtable->write(fdtable[fd].obj, buf, sz);
|
|
|
|
k_mutex_unlock(&fdtable[fd].lock);
|
|
|
|
return res;
|
|
}
|
|
|
|
int zvfs_close(int fd)
|
|
{
|
|
int res;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
|
|
|
|
res = fdtable[fd].vtable->close(fdtable[fd].obj);
|
|
|
|
k_mutex_unlock(&fdtable[fd].lock);
|
|
|
|
z_free_fd(fd);
|
|
|
|
return res;
|
|
}
|
|
|
|
#ifdef CONFIG_POSIX_FSYNC
|
|
int fsync(int fd)
|
|
{
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_FSYNC);
|
|
}
|
|
FUNC_ALIAS(fsync, _fsync, int);
|
|
#endif /* CONFIG_POSIX_FSYNC */
|
|
|
|
off_t zvfs_lseek(int fd, off_t offset, int whence)
|
|
{
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_LSEEK, offset,
|
|
whence);
|
|
}
|
|
|
|
int ioctl(int fd, unsigned long request, ...)
|
|
{
|
|
va_list args;
|
|
int res;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
va_start(args, request);
|
|
res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args);
|
|
va_end(args);
|
|
|
|
return res;
|
|
}
|
|
|
|
int zvfs_fcntl(int fd, int cmd, va_list args)
|
|
{
|
|
int res;
|
|
|
|
if (_check_fd(fd) < 0) {
|
|
return -1;
|
|
}
|
|
|
|
/* The rest of commands are per-fd, handled by ioctl vmethod. */
|
|
res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
|
|
|
|
return res;
|
|
}
|
|
|
|
#if defined(CONFIG_POSIX_DEVICE_IO)
|
|
/*
|
|
* fd operations for stdio/stdout/stderr
|
|
*/
|
|
|
|
int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
|
|
|
|
static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static ssize_t stdinout_write_vmeth(void *obj, const void *buffer, size_t count)
|
|
{
|
|
#if defined(CONFIG_BOARD_NATIVE_POSIX)
|
|
return zvfs_write(1, buffer, count);
|
|
#elif defined(CONFIG_NEWLIB_LIBC) || defined(CONFIG_ARCMWDT_LIBC)
|
|
return z_impl_zephyr_write_stdout(buffer, count);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static int stdinout_ioctl_vmeth(void *obj, unsigned int request, va_list args)
|
|
{
|
|
errno = EINVAL;
|
|
return -1;
|
|
}
|
|
|
|
|
|
static const struct fd_op_vtable stdinout_fd_op_vtable = {
|
|
.read = stdinout_read_vmeth,
|
|
.write = stdinout_write_vmeth,
|
|
.ioctl = stdinout_ioctl_vmeth,
|
|
};
|
|
|
|
#endif /* defined(CONFIG_POSIX_DEVICE_IO) */
|