Updating nano kernel functions to follow a consistent naming convention.
Part of that process is the removal of camelCase naming conventions for the
preferred_underscore_method.
Change accomplished with the following script:
#!/bin/bash
echo "Searching for ${1} to replace with ${2}"
find . -type f \( -iname \*.c -o -iname \*.h -o -iname \*.s -o -iname \*.kconf \) \
-not \( -path host/src/genIdt -prune \) \ \
-not \( -path host/src/gen_tables -prune \) \
-print | xargs sed -i "s/"${1}"/"${2}"/g"
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
322 lines
9.1 KiB
C
322 lines
9.1 KiB
C
/* nano_stack.c - VxMicro nanokernel 'stack' implementation */
|
|
|
|
/*
|
|
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are met:
|
|
*
|
|
* 1) Redistributions of source code must retain the above copyright notice,
|
|
* this list of conditions and the following disclaimer.
|
|
*
|
|
* 2) Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
*
|
|
* 3) Neither the name of Wind River Systems nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software without
|
|
* specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
DESCRIPTION
|
|
This module provides the VxMicro nanokernel (aka system-level) 'stack'
|
|
implementation. This module provides the backing implementation for the
|
|
following APIs:
|
|
|
|
nano_stack_init
|
|
nano_fiber_stack_push, nano_task_stack_push, nano_isr_stack_push
|
|
nano_fiber_stack_pop, nano_task_stack_pop, nano_isr_stack_pop
|
|
nano_fiber_stack_pop_wait, nano_task_stack_pop_wait
|
|
|
|
|
|
INTERNAL
|
|
In some cases the compiler "alias" attribute is used to map two or more
|
|
APIs to the same function, since they have identical implementations.
|
|
|
|
Reference entries for these APIs may eventually be created in the directory
|
|
target/src/kernel/arch/doc.
|
|
*/
|
|
|
|
#include <nanok.h>
|
|
#include <toolchain.h>
|
|
#include <sections.h>
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* nano_stack_init - initialize a nanokernel stack object
|
|
*
|
|
* This function initializes a nanokernel stack object structure.
|
|
*
|
|
* It may be called from either a fiber or a task context.
|
|
*
|
|
* RETURNS: N/A
|
|
*
|
|
* INTERNAL
|
|
* Although the existing implementation will support invocation from an ISR
|
|
* context, for future flexibility, this API will be restricted from ISR
|
|
* level invocation.
|
|
*/
|
|
|
|
void nano_stack_init(struct nano_stack *chan, /* stack to initialize */
|
|
uint32_t *data /* container for stack */
|
|
)
|
|
{
|
|
chan->next = chan->base = data;
|
|
chan->proc = (tCCS *)0;
|
|
}
|
|
|
|
#ifdef CONFIG_MICROKERNEL
|
|
|
|
/*
|
|
* For legacy reasons, the microkernel utilizes the _Cpsh() API which
|
|
* is functionally equivalent to nano_fiber_stack_pushC() (which has been renamed
|
|
* nano_fiber_stack_push() given that, by default, APIs will be a C interface), an
|
|
* an alias will be generated.
|
|
*/
|
|
|
|
FUNC_ALIAS(_stack_push, _Cpsh, void);
|
|
#endif /* CONFIG_MICROKERNEL */
|
|
|
|
FUNC_ALIAS(_stack_push, nano_isr_stack_push, void);
|
|
FUNC_ALIAS(_stack_push, nano_fiber_stack_push, void);
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* _stack_push - push data onto a nanokernel stack
|
|
*
|
|
* This routine pushes a data item onto a stack object; it may be called from
|
|
* either a fiber or ISR context. A fiber pending on the stack object will be
|
|
* made ready, but will NOT be scheduled to execute.
|
|
*
|
|
* RETURNS: N/A
|
|
*
|
|
* INTERNAL
|
|
* This function is capable of supporting invocations from both a fiber and
|
|
* an ISR context. However, the 'nano_isr_stack_push' and 'nano_fiber_stack_push'
|
|
*alias'
|
|
* are created to support any required implementation differences in the future
|
|
* without introducing a source code migration issue.
|
|
*/
|
|
|
|
void _stack_push(struct nano_stack *chan, /* stack channel on
|
|
which to interact */
|
|
uint32_t data /* data to push on stack */
|
|
)
|
|
{
|
|
tCCS *ccs;
|
|
unsigned int imask;
|
|
|
|
imask = irq_lock_inline();
|
|
|
|
ccs = chan->proc;
|
|
if (ccs != (tCCS *)NULL) {
|
|
chan->proc = 0;
|
|
fiberRtnValueSet(ccs, data);
|
|
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
|
} else {
|
|
*(chan->next) = data;
|
|
chan->next++;
|
|
}
|
|
|
|
irq_unlock_inline(imask);
|
|
}
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* nano_task_stack_push - push data onto a nanokernel stack
|
|
*
|
|
* This routine pushes a data item onto a stack object; it may be called only
|
|
* from a task context. A fiber pending on the stack object will be
|
|
* made ready, and will be scheduled to execute.
|
|
*
|
|
* RETURNS: N/A
|
|
*/
|
|
|
|
void nano_task_stack_push(
|
|
struct nano_stack *chan, /* stack channel on which to interact */
|
|
uint32_t data /* data to push on stack */
|
|
)
|
|
{
|
|
tCCS *ccs;
|
|
unsigned int imask;
|
|
|
|
imask = irq_lock_inline();
|
|
|
|
ccs = chan->proc;
|
|
if (ccs != (tCCS *)NULL) {
|
|
chan->proc = 0;
|
|
fiberRtnValueSet(ccs, data);
|
|
_insert_ccs((tCCS **)&_NanoKernel.fiber, ccs);
|
|
|
|
/* swap into the newly ready fiber */
|
|
|
|
_Swap(imask);
|
|
return;
|
|
} else {
|
|
*(chan->next) = data;
|
|
chan->next++;
|
|
}
|
|
|
|
irq_unlock_inline(imask);
|
|
}
|
|
|
|
FUNC_ALIAS(_StackPop, nano_isr_stack_pop, int);
|
|
FUNC_ALIAS(_StackPop, nano_fiber_stack_pop, int);
|
|
FUNC_ALIAS(_StackPop, nano_task_stack_pop, int);
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* _StackPop - pop data from a nanokernel stack
|
|
*
|
|
* Pop the first data word from a nanokernel stack object; it may be called
|
|
* from a fiber, task, or ISR context.
|
|
*
|
|
* If the stack is not empty, a data word is popped and copied to the provided
|
|
* address <pData> and a non-zero value is returned. If the stack is empty,
|
|
* zero is returned.
|
|
*
|
|
* RETURNS: 1 if stack is not empty, 0 otherwise
|
|
*
|
|
* INTERNAL
|
|
* This function is capable of supporting invocations from fiber, task, and
|
|
* ISR contexts. However, the 'nano_isr_stack_pop', 'nano_task_stack_pop', and
|
|
* 'nano_fiber_stack_pop' alias' are created to support any required
|
|
* implementation differences in the future without intoducing a source code
|
|
* migration issue.
|
|
*/
|
|
|
|
int _StackPop(struct nano_stack *chan, /* channel on which to interact */
|
|
uint32_t *pData /* container for data to pop */
|
|
)
|
|
{
|
|
unsigned int imask;
|
|
int rv = 0;
|
|
|
|
imask = irq_lock_inline();
|
|
|
|
if (chan->next > chan->base) {
|
|
chan->next--;
|
|
*pData = *(chan->next);
|
|
rv = 1;
|
|
}
|
|
|
|
irq_unlock_inline(imask);
|
|
return rv;
|
|
}
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* nano_fiber_stack_pop_wait - pop data from a nanokernel stack, wait if not available
|
|
*
|
|
* Pop the first data word from a nanokernel stack object; it can only be
|
|
* called from a fiber context
|
|
*
|
|
* If data is not available the calling fiber will pend until data is pushed
|
|
* onto the stack.
|
|
*
|
|
* RETURNS: the data popped from the stack
|
|
*
|
|
* INTERNAL
|
|
* There exists a separate nano_task_stack_pop_wait() implementation since a task
|
|
* context cannot pend on a nanokernel object. Intead tasks will poll the
|
|
* the stack object.
|
|
*/
|
|
|
|
uint32_t nano_fiber_stack_pop_wait(
|
|
struct nano_stack *chan /* channel on which to interact */
|
|
)
|
|
{
|
|
uint32_t data;
|
|
unsigned int imask;
|
|
|
|
imask = irq_lock_inline();
|
|
|
|
if (chan->next == chan->base) {
|
|
chan->proc = _NanoKernel.current;
|
|
data = (uint32_t)_Swap(imask);
|
|
} else {
|
|
chan->next--;
|
|
data = *(chan->next);
|
|
irq_unlock_inline(imask);
|
|
}
|
|
|
|
return data;
|
|
}
|
|
|
|
/*******************************************************************************
|
|
*
|
|
* nano_task_stack_pop_wait - pop data from a nanokernel stack, poll if not available
|
|
*
|
|
* Pop the first data word from a nanokernel stack; it can only be called
|
|
* from a task context.
|
|
*
|
|
* If data is not available the calling task will poll until data is pushed
|
|
* onto the stack.
|
|
*
|
|
* RETURNS: the data popped from the stack
|
|
*/
|
|
|
|
uint32_t nano_task_stack_pop_wait(
|
|
struct nano_stack *chan /* channel on which to interact */
|
|
)
|
|
{
|
|
uint32_t data;
|
|
unsigned int imask;
|
|
|
|
/* spin until data is pushed onto the stack */
|
|
|
|
while (1) {
|
|
imask = irq_lock_inline();
|
|
|
|
/*
|
|
* Predict that the branch will be taken to break out of the
|
|
* loop.
|
|
* There is little cost to a misprediction since that leads to
|
|
* idle.
|
|
*/
|
|
|
|
if (likely(chan->next > chan->base))
|
|
break;
|
|
|
|
/*
|
|
* Invoke nano_cpu_atomic_idle() with interrupts still disabled to
|
|
*prevent
|
|
* the scenario where an interrupt fires after re-enabling
|
|
*interrupts
|
|
* and before executing the "halt" instruction. If the ISR
|
|
*performs
|
|
* a nano_isr_stack_push() on the stack specified by the 'chan'
|
|
*parameter,
|
|
* the subsequent execution of the "halt" instruction will
|
|
*result
|
|
* in the queued data being ignored until the next interrupt, if
|
|
*any.
|
|
*
|
|
* Thus it should be clear that an architectures implementation
|
|
* of nano_cpu_atomic_idle() must be able to atomically re-enable
|
|
* interrupts and enter a low-power mode.
|
|
*/
|
|
|
|
nano_cpu_atomic_idle(imask);
|
|
}
|
|
|
|
chan->next--;
|
|
data = *(chan->next);
|
|
|
|
irq_unlock_inline(imask);
|
|
|
|
return data;
|
|
}
|