kernel: Integrate object cores into kernel
Integrates object cores into the following kernel structures sys_mem_blocks, k_mem_slab _cpu, z_kernel k_thread, k_timer k_condvar, k_event, k_mutex, k_sem k_mbox, k_msgq, k_pipe, k_fifo, k_lifo, k_stack Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
55db86e512
commit
6df8efe354
22 changed files with 629 additions and 22 deletions
|
|
@ -1463,6 +1463,10 @@ struct k_timer {
|
|||
void *user_data;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_TIMER
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
|
||||
|
|
@ -2201,6 +2205,11 @@ struct k_event {
|
|||
struct k_spinlock lock;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_event)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_EVENT
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#define Z_EVENT_INITIALIZER(obj) \
|
||||
|
|
@ -2359,6 +2368,9 @@ static inline uint32_t k_event_test(struct k_event *event, uint32_t events_mask)
|
|||
|
||||
struct k_fifo {
|
||||
struct k_queue _queue;
|
||||
#ifdef CONFIG_OBJ_CORE_FIFO
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -2386,11 +2398,13 @@ struct k_fifo {
|
|||
*
|
||||
* @param fifo Address of the FIFO queue.
|
||||
*/
|
||||
#define k_fifo_init(fifo) \
|
||||
({ \
|
||||
#define k_fifo_init(fifo) \
|
||||
({ \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
|
||||
k_queue_init(&(fifo)->_queue); \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
|
||||
k_queue_init(&(fifo)->_queue); \
|
||||
K_OBJ_CORE_INIT(K_OBJ_CORE(fifo), _obj_type_fifo); \
|
||||
K_OBJ_CORE_LINK(K_OBJ_CORE(fifo)); \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
|
||||
})
|
||||
|
||||
/**
|
||||
|
|
@ -2593,6 +2607,9 @@ struct k_fifo {
|
|||
|
||||
struct k_lifo {
|
||||
struct k_queue _queue;
|
||||
#ifdef CONFIG_OBJ_CORE_LIFO
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -2621,11 +2638,13 @@ struct k_lifo {
|
|||
*
|
||||
* @param lifo Address of the LIFO queue.
|
||||
*/
|
||||
#define k_lifo_init(lifo) \
|
||||
({ \
|
||||
#define k_lifo_init(lifo) \
|
||||
({ \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
|
||||
k_queue_init(&(lifo)->_queue); \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
|
||||
k_queue_init(&(lifo)->_queue); \
|
||||
K_OBJ_CORE_INIT(K_OBJ_CORE(lifo), _obj_type_lifo); \
|
||||
K_OBJ_CORE_LINK(K_OBJ_CORE(lifo)); \
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
|
||||
})
|
||||
|
||||
/**
|
||||
|
|
@ -2726,6 +2745,10 @@ struct k_stack {
|
|||
uint8_t flags;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_STACK
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
|
||||
|
|
@ -2882,6 +2905,10 @@ struct k_mutex {
|
|||
int owner_orig_prio;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MUTEX
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -2979,6 +3006,10 @@ __syscall int k_mutex_unlock(struct k_mutex *mutex);
|
|||
|
||||
struct k_condvar {
|
||||
_wait_q_t wait_q;
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_CONDVAR
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define Z_CONDVAR_INITIALIZER(obj) \
|
||||
|
|
@ -3067,6 +3098,9 @@ struct k_sem {
|
|||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SEM
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
|
||||
|
|
@ -4379,6 +4413,10 @@ struct k_msgq {
|
|||
uint8_t flags;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MSGQ
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
|
|
@ -4677,6 +4715,10 @@ struct k_mbox {
|
|||
struct k_spinlock lock;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MAILBOX
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
|
|
@ -4814,6 +4856,10 @@ struct k_pipe {
|
|||
uint8_t flags; /**< Flags */
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_PIPE
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -5016,16 +5062,20 @@ struct k_mem_slab {
|
|||
struct k_mem_slab_info info;
|
||||
|
||||
SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
|
||||
slab_num_blocks) \
|
||||
{ \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.lock = {}, \
|
||||
.buffer = slab_buffer, \
|
||||
.free_list = NULL, \
|
||||
.info = {slab_num_blocks, slab_block_size, 0} \
|
||||
#define Z_MEM_SLAB_INITIALIZER(_slab, _slab_buffer, _slab_block_size, \
|
||||
_slab_num_blocks) \
|
||||
{ \
|
||||
.wait_q = Z_WAIT_Q_INIT(&(_slab).wait_q), \
|
||||
.lock = {}, \
|
||||
.buffer = _slab_buffer, \
|
||||
.free_list = NULL, \
|
||||
.info = {_slab_num_blocks, _slab_block_size, 0} \
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -347,6 +347,10 @@ struct k_thread {
|
|||
struct _pipe_desc pipe_desc;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_THREAD
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
|
||||
/** arch-specifics: must always be at the end */
|
||||
struct _thread_arch arch;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <zephyr/sys/sys_heap.h>
|
||||
#include <zephyr/arch/structs.h>
|
||||
#include <zephyr/kernel/stats.h>
|
||||
#include <zephyr/kernel/obj_core.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
@ -145,6 +146,10 @@ struct _cpu {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYSTEM
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
|
||||
/* Per CPU architecture specifics */
|
||||
struct _cpu_arch arch;
|
||||
};
|
||||
|
|
@ -187,6 +192,10 @@ struct z_kernel {
|
|||
struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYSTEM
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||
/* Need to signal an IPI at the next scheduling point */
|
||||
bool pending_ipi;
|
||||
|
|
|
|||
|
|
@ -87,6 +87,7 @@
|
|||
ITERABLE_SECTION_RAM_GC_ALLOWED(k_fifo, 4)
|
||||
ITERABLE_SECTION_RAM_GC_ALLOWED(k_lifo, 4)
|
||||
ITERABLE_SECTION_RAM_GC_ALLOWED(k_condvar, 4)
|
||||
ITERABLE_SECTION_RAM_GC_ALLOWED(sys_mem_blocks_ptr, 4)
|
||||
|
||||
ITERABLE_SECTION_RAM(net_buf_pool, 4)
|
||||
|
||||
|
|
|
|||
|
|
@ -104,6 +104,9 @@ struct sys_mem_blocks {
|
|||
/* Spinlock guarding access to memory block internals */
|
||||
struct k_spinlock lock;
|
||||
#endif
|
||||
#ifdef CONFIG_OBJ_CORE_SYS_MEM_BLOCKS
|
||||
struct k_obj_core obj_core;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sys_multi_mem_blocks {
|
||||
|
|
@ -125,11 +128,15 @@ struct sys_multi_mem_blocks {
|
|||
#define _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, blk_sz, num_blks, buf, mbmod) \
|
||||
_SYS_BITARRAY_DEFINE(_sys_mem_blocks_bitmap_##name, \
|
||||
num_blks, mbmod); \
|
||||
mbmod sys_mem_blocks_t name = { \
|
||||
mbmod struct sys_mem_blocks name = { \
|
||||
.info = {num_blks, ilog2(blk_sz)}, \
|
||||
.buffer = buf, \
|
||||
.bitmap = &_sys_mem_blocks_bitmap_##name, \
|
||||
}
|
||||
}; \
|
||||
STRUCT_SECTION_ITERABLE_ALTERNATE(sys_mem_blocks_ptr, \
|
||||
sys_mem_blocks *, \
|
||||
__##name##_ptr) = &name; \
|
||||
LINKER_KEEP(__##name##_ptr);
|
||||
|
||||
/**
|
||||
* @brief Create a memory block object with a new backing buffer.
|
||||
|
|
|
|||
107
kernel/Kconfig
107
kernel/Kconfig
|
|
@ -505,6 +505,113 @@ menuconfig OBJ_CORE
|
|||
automated means.
|
||||
|
||||
if OBJ_CORE
|
||||
config OBJ_CORE_CONDVAR
|
||||
bool "Integrate condition variables into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates condition variables into the
|
||||
object core framework.
|
||||
|
||||
config OBJ_CORE_EVENT
|
||||
bool "Integrate events into object core framework"
|
||||
default y if EVENTS
|
||||
help
|
||||
When enabled, this option integrate kernel events into the object
|
||||
core framework.
|
||||
|
||||
config OBJ_CORE_FIFO
|
||||
bool "Integrate FIFOs into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates FIFOs into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_LIFO
|
||||
bool "Integrate LIFOs into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates LIFOs into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_MAILBOX
|
||||
bool "Integrate mailboxes into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates mailboxes into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_MEM_SLAB
|
||||
bool "Integrate memory slabs into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates memory slabs into the object
|
||||
core framework.
|
||||
|
||||
config OBJ_CORE_MUTEX
|
||||
bool "Integrate mutexes into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates mutexes into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_MSGQ
|
||||
bool "Integrate message queues into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates message queues into the object
|
||||
core framework.
|
||||
|
||||
config OBJ_CORE_SEM
|
||||
bool "Integrate semaphores into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates semaphores into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_PIPE
|
||||
bool "Integrate pipe into object core framework"
|
||||
default y if PIPES
|
||||
help
|
||||
When enabled, this option integrates pipes into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_SEM
|
||||
bool "Integrate semaphores into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates semaphores into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_STACK
|
||||
bool "Integrate stacks into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates stacks into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_THREAD
|
||||
bool "Integrate threads into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates threads into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_TIMER
|
||||
bool "Integrate timers into object core framework"
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates timers into the object core
|
||||
framework.
|
||||
|
||||
config OBJ_CORE_SYSTEM
|
||||
bool
|
||||
default y
|
||||
help
|
||||
When enabled, this option integrates the internal CPU and kernel
|
||||
system objects into the object core framework. As these are internal
|
||||
structures, this option is hidden by default and only available to
|
||||
advanced users.
|
||||
|
||||
endif # OBJ_CORE
|
||||
|
||||
menu "Work Queue Options"
|
||||
|
|
|
|||
|
|
@ -10,6 +10,11 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
#include <zephyr/syscall_handler.h>
|
||||
#include <zephyr/init.h>
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_CONDVAR
|
||||
static struct k_obj_type obj_type_condvar;
|
||||
#endif
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
|
|
@ -18,6 +23,10 @@ int z_impl_k_condvar_init(struct k_condvar *condvar)
|
|||
z_waitq_init(&condvar->wait_q);
|
||||
z_object_init(condvar);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_CONDVAR
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(condvar), &obj_type_condvar);
|
||||
#endif
|
||||
|
||||
SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0);
|
||||
|
||||
return 0;
|
||||
|
|
@ -125,3 +134,25 @@ int z_vrfy_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
|
|||
}
|
||||
#include <syscalls/k_condvar_wait_mrsh.c>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_CONDVAR
|
||||
static int init_condvar_obj_core_list(void)
|
||||
{
|
||||
/* Initialize condvar object type */
|
||||
|
||||
z_obj_type_init(&obj_type_condvar, K_OBJ_TYPE_CONDVAR_ID,
|
||||
offsetof(struct k_condvar, obj_core));
|
||||
|
||||
/* Initialize and link statically defined condvars */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_condvar, condvar) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(condvar),
|
||||
&obj_type_condvar);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_condvar_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -45,6 +45,10 @@ struct event_walk_data {
|
|||
uint32_t events;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_EVENT
|
||||
static struct k_obj_type obj_type_event;
|
||||
#endif
|
||||
|
||||
void z_impl_k_event_init(struct k_event *event)
|
||||
{
|
||||
event->events = 0;
|
||||
|
|
@ -55,6 +59,10 @@ void z_impl_k_event_init(struct k_event *event)
|
|||
z_waitq_init(&event->wait_q);
|
||||
|
||||
z_object_init(event);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_EVENT
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
@ -336,3 +344,24 @@ uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
|
|||
}
|
||||
#include <syscalls/k_event_wait_all_mrsh.c>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_EVENT
|
||||
static int init_event_obj_core_list(void)
|
||||
{
|
||||
/* Initialize condvar object type */
|
||||
|
||||
z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
|
||||
offsetof(struct k_event, obj_core));
|
||||
|
||||
/* Initialize and link statically defined condvars */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_event, event) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@
|
|||
#include <zephyr/pm/device_runtime.h>
|
||||
LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
|
||||
BUILD_ASSERT(CONFIG_MP_NUM_CPUS == CONFIG_MP_MAX_NUM_CPUS,
|
||||
"CONFIG_MP_NUM_CPUS and CONFIG_MP_MAX_NUM_CPUS need to be set the same");
|
||||
|
||||
|
|
@ -98,6 +97,10 @@ K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
|
|||
|
||||
extern void idle(void *unused1, void *unused2, void *unused3);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYSTEM
|
||||
static struct k_obj_type obj_type_cpu;
|
||||
static struct k_obj_type obj_type_kernel;
|
||||
#endif
|
||||
|
||||
/* LCOV_EXCL_START
|
||||
*
|
||||
|
|
@ -409,6 +412,10 @@ void z_init_cpu(int id)
|
|||
* will keep track of this from here.
|
||||
*/
|
||||
atomic_inc(&_cpus_active);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYSTEM
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(&_kernel.cpus[id]), &obj_type_cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -598,3 +605,33 @@ FUNC_NORETURN void z_cstart(void)
|
|||
|
||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYSTEM
|
||||
static int init_cpu_obj_core_list(void)
|
||||
{
|
||||
/* Initialize CPU object type */
|
||||
|
||||
z_obj_type_init(&obj_type_cpu, K_OBJ_TYPE_CPU_ID,
|
||||
offsetof(struct _cpu, obj_core));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_kernel_obj_core_list(void)
|
||||
{
|
||||
/* Initialize kernel object type */
|
||||
|
||||
z_obj_type_init(&obj_type_kernel, K_OBJ_TYPE_KERNEL_ID,
|
||||
offsetof(struct z_kernel, obj_core));
|
||||
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(&_kernel), &obj_type_kernel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_cpu_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
SYS_INIT(init_kernel_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -20,6 +20,10 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MAILBOX
|
||||
static struct k_obj_type obj_type_mailbox;
|
||||
#endif
|
||||
|
||||
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
|
||||
|
||||
/* asynchronous message descriptor type */
|
||||
|
|
@ -91,6 +95,10 @@ void k_mbox_init(struct k_mbox *mbox)
|
|||
z_waitq_init(&mbox->rx_msg_queue);
|
||||
mbox->lock = (struct k_spinlock) {};
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MAILBOX
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
|
||||
#endif
|
||||
|
||||
SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
|
||||
}
|
||||
|
||||
|
|
@ -447,3 +455,25 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
|||
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MAILBOX
|
||||
|
||||
static int init_mailbox_obj_core_list(void)
|
||||
{
|
||||
/* Initialize mailbox object type */
|
||||
|
||||
z_obj_type_init(&obj_type_mailbox, K_OBJ_TYPE_MBOX_ID,
|
||||
offsetof(struct k_mbox, obj_core));
|
||||
|
||||
/* Initialize and link satically defined mailboxes */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_mbox, mbox) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -17,6 +17,10 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
|
||||
static struct k_obj_type obj_type_mem_slab;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Initialize kernel memory slab subsystem.
|
||||
*
|
||||
|
|
@ -55,23 +59,36 @@ static int create_free_list(struct k_mem_slab *slab)
|
|||
*
|
||||
* @return 0 on success, fails otherwise.
|
||||
*/
|
||||
static int init_mem_slab_module(void)
|
||||
static int init_mem_slab_obj_core_list(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/* Initialize mem_slab object type */
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
|
||||
z_obj_type_init(&obj_type_mem_slab, K_OBJ_TYPE_MEM_SLAB_ID,
|
||||
offsetof(struct k_mem_slab, obj_core));
|
||||
#endif
|
||||
|
||||
/* Initialize statically defined mem_slabs */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_mem_slab, slab) {
|
||||
rc = create_free_list(slab);
|
||||
if (rc < 0) {
|
||||
goto out;
|
||||
}
|
||||
z_object_init(slab);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
|
||||
#endif
|
||||
}
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mem_slab_module, PRE_KERNEL_1,
|
||||
SYS_INIT(init_mem_slab_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
||||
|
|
@ -94,6 +111,10 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
|
|||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MEM_SLAB
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
|
||||
#endif
|
||||
|
||||
z_waitq_init(&slab->wait_q);
|
||||
z_object_init(slab);
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@
|
|||
#include <kernel_internal.h>
|
||||
#include <zephyr/sys/check.h>
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MSGQ
|
||||
static struct k_obj_type obj_type_msgq;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_POLL
|
||||
static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state)
|
||||
{
|
||||
|
|
@ -49,6 +53,10 @@ void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
|
|||
sys_dlist_init(&msgq->poll_events);
|
||||
#endif /* CONFIG_POLL */
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MSGQ
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
|
||||
#endif
|
||||
|
||||
SYS_PORT_TRACING_OBJ_INIT(k_msgq, msgq);
|
||||
|
||||
z_object_init(msgq);
|
||||
|
|
@ -409,3 +417,25 @@ static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *msgq)
|
|||
#include <syscalls/k_msgq_num_used_get_mrsh.c>
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MSGQ
|
||||
static int init_msgq_obj_core_list(void)
|
||||
{
|
||||
/* Initialize msgq object type */
|
||||
|
||||
z_obj_type_init(&obj_type_msgq, K_OBJ_TYPE_MSGQ_ID,
|
||||
offsetof(struct k_msgq, obj_core));
|
||||
|
||||
/* Initialize and link statically defined message queues */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_msgq, msgq) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
SYS_INIT(init_msgq_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -46,6 +46,10 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
|||
*/
|
||||
static struct k_spinlock lock;
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MUTEX
|
||||
static struct k_obj_type obj_type_mutex;
|
||||
#endif
|
||||
|
||||
int z_impl_k_mutex_init(struct k_mutex *mutex)
|
||||
{
|
||||
mutex->owner = NULL;
|
||||
|
|
@ -55,6 +59,10 @@ int z_impl_k_mutex_init(struct k_mutex *mutex)
|
|||
|
||||
z_object_init(mutex);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MUTEX
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
|
||||
#endif
|
||||
|
||||
SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0);
|
||||
|
||||
return 0;
|
||||
|
|
@ -281,3 +289,24 @@ static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex)
|
|||
}
|
||||
#include <syscalls/k_mutex_unlock_mrsh.c>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_MUTEX
|
||||
static int init_mutex_obj_core_list(void)
|
||||
{
|
||||
/* Initialize mutex object type */
|
||||
|
||||
z_obj_type_init(&obj_type_mutex, K_OBJ_TYPE_MUTEX_ID,
|
||||
offsetof(struct k_mutex, obj_core));
|
||||
|
||||
/* Initialize and link statically defined mutexs */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_mutex, mutex) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -31,6 +31,10 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
|
|||
void *data, size_t bytes_to_read,
|
||||
size_t *bytes_read, size_t min_xfer,
|
||||
k_timeout_t timeout);
|
||||
#ifdef CONFIG_OBJ_CORE_PIPE
|
||||
static struct k_obj_type obj_type_pipe;
|
||||
#endif
|
||||
|
||||
|
||||
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
|
||||
{
|
||||
|
|
@ -50,6 +54,10 @@ void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
|
|||
sys_dlist_init(&pipe->poll_events);
|
||||
#endif
|
||||
z_object_init(pipe);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_PIPE
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
|
||||
#endif
|
||||
}
|
||||
|
||||
int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
|
||||
|
|
@ -801,3 +809,24 @@ size_t z_vrfy_k_pipe_write_avail(struct k_pipe *pipe)
|
|||
}
|
||||
#include <syscalls/k_pipe_write_avail_mrsh.c>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_PIPE
|
||||
static int init_pipe_obj_core_list(void)
|
||||
{
|
||||
/* Initialize pipe object type */
|
||||
|
||||
z_obj_type_init(&obj_type_pipe, K_OBJ_TYPE_PIPE_ID,
|
||||
offsetof(struct k_pipe, obj_core));
|
||||
|
||||
/* Initialize and link statically defined pipes */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_pipe, pipe) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_pipe_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -432,3 +432,49 @@ static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
|
|||
#include <syscalls/k_queue_peek_tail_mrsh.c>
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_FIFO
|
||||
struct k_obj_type _obj_type_fifo;
|
||||
|
||||
static int init_fifo_obj_core_list(void)
|
||||
{
|
||||
/* Initialize fifo object type */
|
||||
|
||||
z_obj_type_init(&_obj_type_fifo, K_OBJ_TYPE_FIFO_ID,
|
||||
offsetof(struct k_fifo, obj_core));
|
||||
|
||||
/* Initialize and link statically defined fifos */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_fifo, fifo) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(fifo), &_obj_type_fifo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_LIFO
|
||||
struct k_obj_type _obj_type_lifo;
|
||||
|
||||
static int init_lifo_obj_core_list(void)
|
||||
{
|
||||
/* Initialize lifo object type */
|
||||
|
||||
z_obj_type_init(&_obj_type_lifo, K_OBJ_TYPE_LIFO_ID,
|
||||
offsetof(struct k_lifo, obj_core));
|
||||
|
||||
/* Initialize and link statically defined lifo */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_lifo, lifo) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(lifo), &_obj_type_lifo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1742,6 +1742,10 @@ static void end_thread(struct k_thread *thread)
|
|||
z_thread_cmsis_status_mask_clear(thread);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_THREAD
|
||||
k_obj_core_unlink(K_OBJ_CORE(thread));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
z_mem_domain_exit_thread(thread);
|
||||
z_thread_perms_all_clear(thread);
|
||||
|
|
|
|||
29
kernel/sem.c
29
kernel/sem.c
|
|
@ -38,6 +38,10 @@
|
|||
*/
|
||||
static struct k_spinlock lock;
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SEM
|
||||
static struct k_obj_type obj_type_sem;
|
||||
#endif
|
||||
|
||||
int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
||||
unsigned int limit)
|
||||
{
|
||||
|
|
@ -61,6 +65,10 @@ int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
|
|||
#endif
|
||||
z_object_init(sem);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SEM
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -200,3 +208,24 @@ static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
|
|||
#include <syscalls/k_sem_count_get_mrsh.c>
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SEM
|
||||
static int init_sem_obj_core_list(void)
|
||||
{
|
||||
/* Initialize semaphore object type */
|
||||
|
||||
z_obj_type_init(&obj_type_sem, K_OBJ_TYPE_SEM_ID,
|
||||
offsetof(struct k_sem, obj_core));
|
||||
|
||||
/* Initialize and link statically defined semaphores */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_sem, sem) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -19,6 +19,10 @@
|
|||
#include <zephyr/syscall_handler.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_STACK
|
||||
static struct k_obj_type obj_type_stack;
|
||||
#endif
|
||||
|
||||
void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
|
||||
uint32_t num_entries)
|
||||
{
|
||||
|
|
@ -29,6 +33,10 @@ void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
|
|||
|
||||
SYS_PORT_TRACING_OBJ_INIT(k_stack, stack);
|
||||
z_object_init(stack);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_STACK
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
|
||||
|
|
@ -185,3 +193,24 @@ static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
|
|||
}
|
||||
#include <syscalls/k_stack_pop_mrsh.c>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_STACK
|
||||
static int init_stack_obj_core_list(void)
|
||||
{
|
||||
/* Initialize stack object type */
|
||||
|
||||
z_obj_type_init(&obj_type_stack, K_OBJ_TYPE_STACK_ID,
|
||||
offsetof(struct k_stack, obj_core));
|
||||
|
||||
/* Initialize and link statically defined stacks */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_stack, stack) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -33,6 +33,25 @@
|
|||
|
||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_THREAD
|
||||
static struct k_obj_type obj_type_thread;
|
||||
|
||||
static int init_thread_obj_core_list(void)
|
||||
{
|
||||
/* Initialize mem_slab object type */
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_THREAD
|
||||
z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
|
||||
offsetof(struct k_thread, obj_core));
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
/* This lock protects the linked list of active threads; i.e. the
|
||||
* initial _kernel.threads pointer and the linked list made up of
|
||||
|
|
@ -543,6 +562,10 @@ char *z_setup_new_thread(struct k_thread *new_thread,
|
|||
|
||||
Z_ASSERT_VALID_PRIO(prio, entry);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_THREAD
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
|
||||
"user thread %p with kernel-only stack %p",
|
||||
|
|
|
|||
|
|
@ -15,6 +15,10 @@
|
|||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_TIMER
|
||||
static struct k_obj_type obj_type_timer;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Handle expiration of a kernel timer object.
|
||||
*
|
||||
|
|
@ -125,6 +129,10 @@ void k_timer_init(struct k_timer *timer,
|
|||
timer->user_data = NULL;
|
||||
|
||||
z_object_init(timer);
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_TIMER
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -325,3 +333,23 @@ static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
|
|||
#include <syscalls/k_timer_user_data_set_mrsh.c>
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_TIMER
|
||||
static int init_timer_obj_core_list(void)
|
||||
{
|
||||
/* Initialize timer object type */
|
||||
|
||||
z_obj_type_init(&obj_type_timer, K_OBJ_TYPE_TIMER_ID,
|
||||
offsetof(struct k_timer, obj_core));
|
||||
|
||||
/* Initialize and link statically defined timers */
|
||||
|
||||
STRUCT_SECTION_FOREACH(k_timer, timer) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -133,4 +133,12 @@ config SYS_MEM_BLOCKS_RUNTIME_STATS
|
|||
blocks statistics related to the current and maximum number
|
||||
of allocations in a given memory block.
|
||||
|
||||
config OBJ_CORE_SYS_MEM_BLOCKS
|
||||
bool "Kernel object for memory blocks"
|
||||
depends on SYS_MEM_BLOCKS && OBJ_CORE
|
||||
default y if SYS_MEM_BLOCKS && OBJ_CORE
|
||||
help
|
||||
This option allows object cores to be integrated into memory block
|
||||
objects.
|
||||
|
||||
endmenu
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
#include <zephyr/sys/heap_listener.h>
|
||||
#include <zephyr/sys/mem_blocks.h>
|
||||
#include <zephyr/sys/util.h>
|
||||
#include <zephyr/init.h>
|
||||
|
||||
static void *alloc_blocks(sys_mem_blocks_t *mem_block, size_t num_blocks)
|
||||
{
|
||||
|
|
@ -364,7 +365,7 @@ int sys_multi_mem_blocks_alloc(sys_multi_mem_blocks_t *group,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (count > allocator->num_blocks) {
|
||||
if (count > allocator->info.num_blocks) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -454,3 +455,28 @@ int sys_mem_blocks_runtime_stats_reset_max(sys_mem_blocks_t *mem_block)
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OBJ_CORE_SYS_MEM_BLOCKS
|
||||
static struct k_obj_type obj_type_sys_mem_blocks;
|
||||
|
||||
static int init_sys_mem_blocks_obj_core_list(void)
|
||||
{
|
||||
/* Initialize the sys_mem_blocks object type */
|
||||
|
||||
z_obj_type_init(&obj_type_sys_mem_blocks, K_OBJ_TYPE_MEM_BLOCK_ID,
|
||||
offsetof(struct sys_mem_blocks, obj_core));
|
||||
|
||||
/* Initialize statically defined sys_mem_blocks */
|
||||
|
||||
STRUCT_SECTION_FOREACH_ALTERNATE(sys_mem_blocks_ptr,
|
||||
sys_mem_blocks *, block_pp) {
|
||||
k_obj_core_init_and_link(K_OBJ_CORE(*block_pp),
|
||||
&obj_type_sys_mem_blocks);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_sys_mem_blocks_obj_core_list, PRE_KERNEL_1,
|
||||
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Reference in a new issue