kernel: remove legacy fields in _kernel
UP should just use _kernel.cpus[0]. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
b07489614f
commit
a203d21962
16 changed files with 44 additions and 61 deletions
|
|
@ -307,13 +307,13 @@
|
||||||
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
||||||
#else
|
#else
|
||||||
mov \reg1, _kernel
|
mov \reg1, _kernel
|
||||||
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
|
ld \reg2, [\reg1, _kernel_offset_to_nested]
|
||||||
#endif
|
#endif
|
||||||
add \reg2, \reg2, 1
|
add \reg2, \reg2, 1
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
||||||
#else
|
#else
|
||||||
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
|
st \reg2, [\reg1, _kernel_offset_to_nested]
|
||||||
#endif
|
#endif
|
||||||
cmp \reg2, 1
|
cmp \reg2, 1
|
||||||
.endm
|
.endm
|
||||||
|
|
@ -329,13 +329,13 @@
|
||||||
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
||||||
#else
|
#else
|
||||||
mov \reg1, _kernel
|
mov \reg1, _kernel
|
||||||
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
|
ld \reg2, [\reg1, _kernel_offset_to_nested]
|
||||||
#endif
|
#endif
|
||||||
sub \reg2, \reg2, 1
|
sub \reg2, \reg2, 1
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
|
||||||
#else
|
#else
|
||||||
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
|
st \reg2, [\reg1, _kernel_offset_to_nested]
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ extern void z_arm64_offload(void);
|
||||||
|
|
||||||
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
||||||
{
|
{
|
||||||
return _kernel.nested != 0U;
|
return _kernel.cpus[0].nested != 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ void _enter_irq(u32_t ipending)
|
||||||
read_timer_start_of_isr();
|
read_timer_start_of_isr();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_kernel.nested++;
|
_kernel.cpus[0].nested++;
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_OFFLOAD
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
z_irq_do_offload();
|
z_irq_do_offload();
|
||||||
|
|
@ -113,7 +113,7 @@ void _enter_irq(u32_t ipending)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
_kernel.nested--;
|
_kernel.cpus[0].nested--;
|
||||||
#ifdef CONFIG_STACK_SENTINEL
|
#ifdef CONFIG_STACK_SENTINEL
|
||||||
z_check_stack_sentinel();
|
z_check_stack_sentinel();
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
|
||||||
|
|
||||||
static inline bool arch_is_in_isr(void)
|
static inline bool arch_is_in_isr(void)
|
||||||
{
|
{
|
||||||
return _kernel.nested != 0U;
|
return _kernel.cpus[0].nested != 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_OFFLOAD
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@
|
||||||
int arch_swap(unsigned int key)
|
int arch_swap(unsigned int key)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* struct k_thread * _kernel.current is the currently runnig thread
|
* struct k_thread * _current is the currently runnig thread
|
||||||
* struct k_thread * _kernel.ready_q.cache contains the next thread to
|
* struct k_thread * _kernel.ready_q.cache contains the next thread to
|
||||||
* run (cannot be NULL)
|
* run (cannot be NULL)
|
||||||
*
|
*
|
||||||
|
|
@ -30,8 +30,8 @@ int arch_swap(unsigned int key)
|
||||||
* and so forth. But we do not need to do so because we use posix
|
* and so forth. But we do not need to do so because we use posix
|
||||||
* threads => those are all nicely kept by the native OS kernel
|
* threads => those are all nicely kept by the native OS kernel
|
||||||
*/
|
*/
|
||||||
_kernel.current->callee_saved.key = key;
|
_current->callee_saved.key = key;
|
||||||
_kernel.current->callee_saved.retval = -EAGAIN;
|
_current->callee_saved.retval = -EAGAIN;
|
||||||
|
|
||||||
/* retval may be modified with a call to
|
/* retval may be modified with a call to
|
||||||
* arch_thread_return_value_set()
|
* arch_thread_return_value_set()
|
||||||
|
|
@ -43,10 +43,10 @@ int arch_swap(unsigned int key)
|
||||||
|
|
||||||
posix_thread_status_t *this_thread_ptr =
|
posix_thread_status_t *this_thread_ptr =
|
||||||
(posix_thread_status_t *)
|
(posix_thread_status_t *)
|
||||||
_kernel.current->callee_saved.thread_status;
|
_current->callee_saved.thread_status;
|
||||||
|
|
||||||
|
|
||||||
_kernel.current = _kernel.ready_q.cache;
|
_current = _kernel.ready_q.cache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here a "real" arch would load all processor registers for the thread
|
* Here a "real" arch would load all processor registers for the thread
|
||||||
|
|
@ -59,9 +59,9 @@ int arch_swap(unsigned int key)
|
||||||
|
|
||||||
/* When we continue, _kernel->current points back to this thread */
|
/* When we continue, _kernel->current points back to this thread */
|
||||||
|
|
||||||
irq_unlock(_kernel.current->callee_saved.key);
|
irq_unlock(_current->callee_saved.key);
|
||||||
|
|
||||||
return _kernel.current->callee_saved.retval;
|
return _current->callee_saved.retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -83,7 +83,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||||
|
|
||||||
sys_trace_thread_switched_out();
|
sys_trace_thread_switched_out();
|
||||||
|
|
||||||
_kernel.current = _kernel.ready_q.cache;
|
_current = _kernel.ready_q.cache;
|
||||||
|
|
||||||
sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
|
||||||
|
|
||||||
static inline bool arch_is_in_isr(void)
|
static inline bool arch_is_in_isr(void)
|
||||||
{
|
{
|
||||||
return _kernel.nested != 0U;
|
return _kernel.cpus[0].nested != 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
|
||||||
|
|
@ -270,7 +270,7 @@ is_interrupt:
|
||||||
RV_OP_STOREREG t0, 0x00(sp)
|
RV_OP_STOREREG t0, 0x00(sp)
|
||||||
|
|
||||||
on_irq_stack:
|
on_irq_stack:
|
||||||
/* Increment _kernel.nested variable */
|
/* Increment _kernel.cpus[0].nested variable */
|
||||||
lw t3, _kernel_offset_to_nested(t2)
|
lw t3, _kernel_offset_to_nested(t2)
|
||||||
addi t3, t3, 1
|
addi t3, t3, 1
|
||||||
sw t3, _kernel_offset_to_nested(t2)
|
sw t3, _kernel_offset_to_nested(t2)
|
||||||
|
|
@ -337,7 +337,7 @@ on_thread_stack:
|
||||||
/* Get reference to _kernel */
|
/* Get reference to _kernel */
|
||||||
la t1, _kernel
|
la t1, _kernel
|
||||||
|
|
||||||
/* Decrement _kernel.nested variable */
|
/* Decrement _kernel.cpus[0].nested variable */
|
||||||
lw t2, _kernel_offset_to_nested(t1)
|
lw t2, _kernel_offset_to_nested(t1)
|
||||||
addi t2, t2, -1
|
addi t2, t2, -1
|
||||||
sw t2, _kernel_offset_to_nested(t1)
|
sw t2, _kernel_offset_to_nested(t1)
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
|
||||||
|
|
||||||
static inline bool arch_is_in_isr(void)
|
static inline bool arch_is_in_isr(void)
|
||||||
{
|
{
|
||||||
return _kernel.nested != 0U;
|
return _kernel.cpus[0].nested != 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_OFFLOAD
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ static inline bool arch_is_in_isr(void)
|
||||||
__asm__ volatile ("popf");
|
__asm__ volatile ("popf");
|
||||||
return ret;
|
return ret;
|
||||||
#else
|
#else
|
||||||
return _kernel.nested != 0U;
|
return _kernel.cpus[0].nested != 0U;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -76,11 +76,11 @@ void posix_irq_handler(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_kernel.nested == 0) {
|
if (_kernel.cpus[0].nested == 0) {
|
||||||
may_swap = 0;
|
may_swap = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
_kernel.nested++;
|
_kernel.cpus[0].nested++;
|
||||||
|
|
||||||
while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
|
while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
|
||||||
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
|
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
|
||||||
|
|
@ -96,7 +96,7 @@ void posix_irq_handler(void)
|
||||||
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
|
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
_kernel.nested--;
|
_kernel.cpus[0].nested--;
|
||||||
|
|
||||||
/* Call swap if all the following is true:
|
/* Call swap if all the following is true:
|
||||||
* 1) may_swap was enabled
|
* 1) may_swap was enabled
|
||||||
|
|
|
||||||
|
|
@ -133,11 +133,11 @@ void posix_irq_handler(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_kernel.nested == 0) {
|
if (_kernel.cpus[0].nested == 0) {
|
||||||
may_swap = 0;
|
may_swap = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
_kernel.nested++;
|
_kernel.cpus[0].nested++;
|
||||||
|
|
||||||
while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
|
while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
|
||||||
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
|
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
|
||||||
|
|
@ -153,7 +153,7 @@ void posix_irq_handler(void)
|
||||||
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
|
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
_kernel.nested--;
|
_kernel.cpus[0].nested--;
|
||||||
|
|
||||||
/* Call swap if all the following is true:
|
/* Call swap if all the following is true:
|
||||||
* 1) may_swap was enabled
|
* 1) may_swap was enabled
|
||||||
|
|
|
||||||
|
|
@ -253,7 +253,7 @@ static inline void arch_isr_direct_header(void)
|
||||||
/* We're not going to unlock IRQs, but we still need to increment this
|
/* We're not going to unlock IRQs, but we still need to increment this
|
||||||
* so that arch_is_in_isr() works
|
* so that arch_is_in_isr() works
|
||||||
*/
|
*/
|
||||||
++_kernel.nested;
|
++_kernel.cpus[0].nested;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -269,7 +269,7 @@ static inline void arch_isr_direct_footer(int swap)
|
||||||
#if defined(CONFIG_TRACING)
|
#if defined(CONFIG_TRACING)
|
||||||
sys_trace_isr_exit();
|
sys_trace_isr_exit();
|
||||||
#endif
|
#endif
|
||||||
--_kernel.nested;
|
--_kernel.cpus[0].nested;
|
||||||
|
|
||||||
/* Call swap if all the following is true:
|
/* Call swap if all the following is true:
|
||||||
*
|
*
|
||||||
|
|
@ -277,7 +277,7 @@ static inline void arch_isr_direct_footer(int swap)
|
||||||
* 2) We are not in a nested interrupt
|
* 2) We are not in a nested interrupt
|
||||||
* 3) Next thread to run in the ready queue is not this thread
|
* 3) Next thread to run in the ready queue is not this thread
|
||||||
*/
|
*/
|
||||||
if (swap != 0 && _kernel.nested == 0 &&
|
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
|
||||||
_kernel.ready_q.cache != _current) {
|
_kernel.ready_q.cache != _current) {
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -133,25 +133,7 @@ struct _cpu {
|
||||||
typedef struct _cpu _cpu_t;
|
typedef struct _cpu _cpu_t;
|
||||||
|
|
||||||
struct z_kernel {
|
struct z_kernel {
|
||||||
/* For compatibility with pre-SMP code, union the first CPU
|
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
|
||||||
* record with the legacy fields so code can continue to use
|
|
||||||
* the "_kernel.XXX" expressions and assembly offsets.
|
|
||||||
*/
|
|
||||||
union {
|
|
||||||
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
|
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
struct {
|
|
||||||
/* nested interrupt count */
|
|
||||||
u32_t nested;
|
|
||||||
|
|
||||||
/* interrupt stack pointer base */
|
|
||||||
char *irq_stack;
|
|
||||||
|
|
||||||
/* currently scheduled thread */
|
|
||||||
struct k_thread *current;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
/* queue of timeouts */
|
/* queue of timeouts */
|
||||||
|
|
@ -204,7 +186,7 @@ bool z_smp_cpu_mobile(void);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define _current_cpu (&_kernel.cpus[0])
|
#define _current_cpu (&_kernel.cpus[0])
|
||||||
#define _current _kernel.current
|
#define _current _kernel.cpus[0].current
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _timeout_q _kernel.timeout_q
|
#define _timeout_q _kernel.timeout_q
|
||||||
|
|
|
||||||
|
|
@ -23,12 +23,6 @@
|
||||||
|
|
||||||
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
|
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
|
||||||
GEN_OFFSET_SYM(_kernel_t, current);
|
|
||||||
GEN_OFFSET_SYM(_kernel_t, nested);
|
|
||||||
GEN_OFFSET_SYM(_kernel_t, irq_stack);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
GEN_OFFSET_SYM(_cpu_t, current);
|
GEN_OFFSET_SYM(_cpu_t, current);
|
||||||
GEN_OFFSET_SYM(_cpu_t, nested);
|
GEN_OFFSET_SYM(_cpu_t, nested);
|
||||||
GEN_OFFSET_SYM(_cpu_t, irq_stack);
|
GEN_OFFSET_SYM(_cpu_t, irq_stack);
|
||||||
|
|
|
||||||
|
|
@ -13,15 +13,18 @@
|
||||||
/* kernel */
|
/* kernel */
|
||||||
|
|
||||||
/* main */
|
/* main */
|
||||||
|
#ifndef CONFIG_SMP
|
||||||
|
/* Relies on _kernel.cpu being the first member of _kernel and having 1 element
|
||||||
|
*/
|
||||||
#define _kernel_offset_to_nested \
|
#define _kernel_offset_to_nested \
|
||||||
(___kernel_t_nested_OFFSET)
|
(___cpu_t_nested_OFFSET)
|
||||||
|
|
||||||
#define _kernel_offset_to_irq_stack \
|
#define _kernel_offset_to_irq_stack \
|
||||||
(___kernel_t_irq_stack_OFFSET)
|
(___cpu_t_irq_stack_OFFSET)
|
||||||
|
|
||||||
#define _kernel_offset_to_current \
|
#define _kernel_offset_to_current \
|
||||||
(___kernel_t_current_OFFSET)
|
(___cpu_t_current_OFFSET)
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define _kernel_offset_to_idle \
|
#define _kernel_offset_to_idle \
|
||||||
(___kernel_t_idle_OFFSET)
|
(___kernel_t_idle_OFFSET)
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,10 @@ enum {
|
||||||
OPENOCD_OFFSET_T_COOP_FLOAT,
|
OPENOCD_OFFSET_T_COOP_FLOAT,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if CONFIG_MP_NUM_CPUS > 1
|
||||||
|
#error "This code doesn't work properly with multiple CPUs enabled"
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Forward-compatibility notes: 1) Only append items to this table; otherwise
|
/* Forward-compatibility notes: 1) Only append items to this table; otherwise
|
||||||
* OpenOCD versions that expect less items will read garbage values.
|
* OpenOCD versions that expect less items will read garbage values.
|
||||||
* 2) Avoid incompatible changes that affect the interpretation of existing
|
* 2) Avoid incompatible changes that affect the interpretation of existing
|
||||||
|
|
@ -36,7 +40,7 @@ __attribute__((used, section(".openocd_dbg")))
|
||||||
size_t _kernel_openocd_offsets[] = {
|
size_t _kernel_openocd_offsets[] = {
|
||||||
/* Version 0 starts */
|
/* Version 0 starts */
|
||||||
[OPENOCD_OFFSET_VERSION] = 1,
|
[OPENOCD_OFFSET_VERSION] = 1,
|
||||||
[OPENOCD_OFFSET_K_CURR_THREAD] = offsetof(struct z_kernel, current),
|
[OPENOCD_OFFSET_K_CURR_THREAD] = offsetof(struct _cpu, current),
|
||||||
[OPENOCD_OFFSET_K_THREADS] = offsetof(struct z_kernel, threads),
|
[OPENOCD_OFFSET_K_THREADS] = offsetof(struct z_kernel, threads),
|
||||||
[OPENOCD_OFFSET_T_ENTRY] = offsetof(struct k_thread, entry),
|
[OPENOCD_OFFSET_T_ENTRY] = offsetof(struct k_thread, entry),
|
||||||
[OPENOCD_OFFSET_T_NEXT_THREAD] = offsetof(struct k_thread, next_thread),
|
[OPENOCD_OFFSET_T_NEXT_THREAD] = offsetof(struct k_thread, next_thread),
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue