Rename K_Task to _k_current_task
Updating global variable's name to follow a consistent naming convention.
Change accomplished with the following script:
#!/bin/bash
echo "Searching for ${1} to replace with ${2}"
find ./ \( -name "*.[chs]" -o -name "sysgen.py" -o -name "*.kconf" \) \
! -path "./host/src/genIdt/*" \
! -path "*/outdir/*" | xargs sed -i 's/\b'${1}'\b/'${2}'/g';
Change-Id: Ic81b4ad7edf476da61ae62df627866e0446714d7
Signed-off-by: Yonattan Louise <yonattan.a.louise.mendoza@intel.com>
This commit is contained in:
parent
f282dbcb1f
commit
9bbb6e1e7b
22 changed files with 72 additions and 72 deletions
|
|
@ -72,13 +72,13 @@ void _TaskAbort(void)
|
|||
const int taskAbortCode = 1;
|
||||
|
||||
if (_ScbIsInThreadMode()) {
|
||||
_task_ioctl(K_Task->Ident, taskAbortCode);
|
||||
_task_ioctl(_k_current_task->Ident, taskAbortCode);
|
||||
} else {
|
||||
cmdpacket.Comm = TSKOP;
|
||||
cmdpacket.Args.g1.task = K_Task->Ident;
|
||||
cmdpacket.Args.g1.task = _k_current_task->Ident;
|
||||
cmdpacket.Args.g1.opt = taskAbortCode;
|
||||
cmdpacket.alloc = false;
|
||||
K_Task->Args = &cmdpacket;
|
||||
_k_current_task->Args = &cmdpacket;
|
||||
nano_isr_stack_push(&K_Args, (uint32_t) &cmdpacket);
|
||||
_ScbPendsvSet();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ extern "C" {
|
|||
|
||||
#include <microkernel/task_api_export.h>
|
||||
|
||||
extern struct k_proc *K_Task;
|
||||
extern struct k_proc *_k_current_task;
|
||||
extern const knode_t _k_this_node;
|
||||
|
||||
/*
|
||||
|
|
@ -83,11 +83,11 @@ extern void task_abort_handler_set(void (*func)(void));
|
|||
extern void KS_TaskSetSwitchCallBack(taskswitchcallbackfunc func);
|
||||
#endif
|
||||
|
||||
#define task_id_get() (K_Task->Ident)
|
||||
#define task_priority_get() (K_Task->Prio)
|
||||
#define task_group_mask_get() (K_Task->Group)
|
||||
#define task_group_join(g) (K_Task->Group |= g)
|
||||
#define task_group_leave(g) (K_Task->Group &= ~g)
|
||||
#define task_id_get() (_k_current_task->Ident)
|
||||
#define task_priority_get() (_k_current_task->Prio)
|
||||
#define task_group_mask_get() (_k_current_task->Group)
|
||||
#define task_group_join(g) (_k_current_task->Group |= g)
|
||||
#define task_group_leave(g) (_k_current_task->Group &= ~g)
|
||||
#define task_node_id_get() (_k_this_node)
|
||||
|
||||
#define isr_task_id_get() task_id_get()
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ int _task_pipe_get(kpipe_t Id,
|
|||
if (unlikely(_0_TO_N == Option && TICKS_NONE != TimeOut))
|
||||
return RC_FAIL;
|
||||
|
||||
A.Prio = K_Task->Prio;
|
||||
A.Prio = _k_current_task->Prio;
|
||||
A.Comm = CHDEQ_REQ;
|
||||
A.Time.ticks = TimeOut;
|
||||
{
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ int _task_pipe_put_async(
|
|||
return RC_FAIL; /* not allowed because enlisted requests with
|
||||
zero size will hang in K_ChProc() */
|
||||
|
||||
A.Prio = K_Task->Prio;
|
||||
A.Prio = _k_current_task->Prio;
|
||||
A.Comm = CHENQ_REQ;
|
||||
A.Time
|
||||
.ticks = TICKS_UNLIMITED; /* same behavior in flow as a blocking
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ int _task_pipe_put(kpipe_t Id,
|
|||
if (unlikely(_0_TO_N == Option && TICKS_NONE != TimeOut))
|
||||
return RC_FAIL;
|
||||
|
||||
A.Prio = K_Task->Prio;
|
||||
A.Prio = _k_current_task->Prio;
|
||||
A.Comm = CHENQ_REQ;
|
||||
A.Time.ticks = TimeOut;
|
||||
{
|
||||
|
|
|
|||
|
|
@ -51,10 +51,10 @@ void K_ChRecvReq(struct k_args *RequestOrig)
|
|||
|
||||
{/* If it's a poster, then don't deschedule the task */
|
||||
RequestOrig->Ctxt.proc =
|
||||
K_Task; /* First we save the pointer to
|
||||
_k_current_task; /* First we save the pointer to
|
||||
the tasks TCB for
|
||||
rescheduling later */
|
||||
set_state_bit(K_Task, TF_RECV);
|
||||
set_state_bit(_k_current_task, TF_RECV);
|
||||
}
|
||||
{
|
||||
mycopypacket(&Request, RequestOrig);
|
||||
|
|
|
|||
|
|
@ -58,10 +58,10 @@ void K_ChSendReq(struct k_args *RequestOrig)
|
|||
|
||||
if (!bAsync) {
|
||||
RequestOrig->Ctxt.proc =
|
||||
K_Task; /* First we save the pointer to
|
||||
_k_current_task; /* First we save the pointer to
|
||||
the tasks TCB for
|
||||
rescheduling later */
|
||||
set_state_bit(K_Task, TF_SEND);
|
||||
set_state_bit(_k_current_task, TF_SEND);
|
||||
} else {
|
||||
RequestOrig->Ctxt.proc =
|
||||
NULL; /* No need to put in data about
|
||||
|
|
|
|||
|
|
@ -48,6 +48,6 @@
|
|||
void K_taskcall(struct k_args *cmdpacket)
|
||||
{
|
||||
cmdpacket->alloc = false;
|
||||
K_Task->Args = cmdpacket;
|
||||
_k_current_task->Args = cmdpacket;
|
||||
nano_task_stack_push(&K_Args, (uint32_t)cmdpacket);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ extern int _k_mem_map_count;
|
|||
extern int _k_mem_pool_count;
|
||||
extern int _k_pipe_count;
|
||||
|
||||
extern struct k_proc *K_Task;
|
||||
extern struct k_proc *_k_current_task;
|
||||
extern uint32_t K_PrioBitMap[];
|
||||
|
||||
#ifndef LITE
|
||||
|
|
|
|||
|
|
@ -140,9 +140,9 @@ void K_event_test(struct k_args *A)
|
|||
if (likely(A->Time.ticks != TICKS_NONE)) {
|
||||
/* Caller will wait for the event */
|
||||
if (likely(E->waiter == NULL)) {
|
||||
A->Ctxt.proc = K_Task;
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
E->waiter = A;
|
||||
set_state_bit(K_Task, TF_EVNT);
|
||||
set_state_bit(_k_current_task, TF_EVNT);
|
||||
if (A->Time.ticks == TICKS_UNLIMITED) {
|
||||
A->Time.timer = NULL;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -336,7 +336,7 @@ void K_sendreq(struct k_args *Writer)
|
|||
*/
|
||||
|
||||
if (!bAsync) {
|
||||
sender = K_Task;
|
||||
sender = _k_current_task;
|
||||
set_state_bit(sender, TF_SEND);
|
||||
}
|
||||
|
||||
|
|
@ -491,7 +491,7 @@ int _task_mbox_put(kmbox_t mbox, /* mailbox */
|
|||
return RC_FAIL;
|
||||
}
|
||||
|
||||
M->tx_task = K_Task->Ident;
|
||||
M->tx_task = _k_current_task->Ident;
|
||||
M->tx_block.poolid = 0; /* NO ASYNC POST */
|
||||
M->extra.sema = 0;
|
||||
M->mailbox = mbox;
|
||||
|
|
@ -568,7 +568,7 @@ void K_recvreq(struct k_args *Reader)
|
|||
struct k_args *temp;
|
||||
struct k_args *CopyReader;
|
||||
|
||||
Reader->Ctxt.proc = K_Task;
|
||||
Reader->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(Reader->Ctxt.proc, TF_RECV);
|
||||
|
||||
copypacket(&CopyReader, Reader);
|
||||
|
|
@ -691,7 +691,7 @@ int _task_mbox_get(kmbox_t mbox, /* mailbox */
|
|||
{
|
||||
struct k_args A;
|
||||
|
||||
M->rx_task = K_Task->Ident;
|
||||
M->rx_task = _k_current_task->Ident;
|
||||
M->mailbox = mbox;
|
||||
M->extra.transfer = 0;
|
||||
|
||||
|
|
@ -700,7 +700,7 @@ int _task_mbox_get(kmbox_t mbox, /* mailbox */
|
|||
* there is an assertion check in prepare_transfer() if equal to 0
|
||||
*/
|
||||
|
||||
A.Prio = K_Task->Prio;
|
||||
A.Prio = _k_current_task->Prio;
|
||||
A.Comm = RECV_REQ;
|
||||
A.Time.ticks = time;
|
||||
A.Args.m1.mess = *M;
|
||||
|
|
@ -739,7 +739,7 @@ void _task_mbox_put_async(kmbox_t mbox, /* mailbox to which to send message */
|
|||
M->tx_block.poolid = (uint32_t)(-1);
|
||||
}
|
||||
|
||||
M->tx_task = K_Task->Ident;
|
||||
M->tx_task = _k_current_task->Ident;
|
||||
M->tx_data = NULL;
|
||||
M->mailbox = mbox;
|
||||
M->extra.sema = sema;
|
||||
|
|
@ -764,8 +764,8 @@ void K_recvdata(struct k_args *Starter)
|
|||
struct k_args *MoveD;
|
||||
struct k_args *Writer;
|
||||
|
||||
Starter->Ctxt.proc = K_Task;
|
||||
set_state_bit(K_Task, TF_RECVDATA);
|
||||
Starter->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(_k_current_task, TF_RECVDATA);
|
||||
|
||||
GETARGS(CopyStarter);
|
||||
k_memcpy_s(CopyStarter, sizeof(struct k_args),
|
||||
|
|
@ -942,8 +942,8 @@ void K_senddata(struct k_args *Starter)
|
|||
struct k_args *MoveD;
|
||||
struct k_args *Reader;
|
||||
|
||||
Starter->Ctxt.proc = K_Task;
|
||||
set_state_bit(K_Task, TF_SENDDATA);
|
||||
Starter->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(_k_current_task, TF_SENDDATA);
|
||||
|
||||
GETARGS(CopyStarter);
|
||||
k_memcpy_s(CopyStarter, sizeof(struct k_args),
|
||||
|
|
|
|||
|
|
@ -115,9 +115,9 @@ void K_alloc(struct k_args *A)
|
|||
*(A->Args.a1.mptr) = NULL;
|
||||
|
||||
if (likely(A->Time.ticks != TICKS_NONE)) {
|
||||
A->Prio = K_Task->Prio;
|
||||
A->Ctxt.proc = K_Task;
|
||||
set_state_bit(K_Task, TF_ALLO);
|
||||
A->Prio = _k_current_task->Prio;
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(_k_current_task, TF_ALLO);
|
||||
INSERT_ELM(M->Waiters, A);
|
||||
if (A->Time.ticks == TICKS_UNLIMITED)
|
||||
A->Time.timer = NULL;
|
||||
|
|
|
|||
|
|
@ -534,9 +534,9 @@ void K_GetBlock(struct k_args *A)
|
|||
(A->Time.ticks != TICKS_NONE) &&
|
||||
(A->Args.p1.req_size <=
|
||||
P->maxblock_size))) {/* timeout? but not block to large */
|
||||
A->Prio = K_Task->Prio;
|
||||
A->Ctxt.proc = K_Task;
|
||||
set_state_bit(K_Task, TF_GTBL); /* extra new statebit */
|
||||
A->Prio = _k_current_task->Prio;
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(_k_current_task, TF_GTBL); /* extra new statebit */
|
||||
|
||||
/* INSERT_ELM (P->frag_tab[offset].Waiters, A); */
|
||||
INSERT_ELM(P->Waiters, A);
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ void K_lockreq(struct k_args *A /* pointer to mutex lock
|
|||
* task is on this node. This may be more recent than
|
||||
* that stored in struct k_args.
|
||||
*/
|
||||
Mutex->OwnerCurrentPrio = K_Task->Prio;
|
||||
Mutex->OwnerCurrentPrio = _k_current_task->Prio;
|
||||
|
||||
/*
|
||||
* Save the original priority when first acquiring the lock (but
|
||||
|
|
@ -211,9 +211,9 @@ void K_lockreq(struct k_args *A /* pointer to mutex lock
|
|||
* the priority saved in the request is up to
|
||||
* date.
|
||||
*/
|
||||
A->Ctxt.proc = K_Task;
|
||||
A->Prio = K_Task->Prio;
|
||||
set_state_bit(K_Task, TF_LOCK);
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
A->Prio = _k_current_task->Prio;
|
||||
set_state_bit(_k_current_task, TF_LOCK);
|
||||
/* Note: Mutex->Waiters is a priority sorted list */
|
||||
INSERT_ELM(Mutex->Waiters, A);
|
||||
#ifndef LITE
|
||||
|
|
@ -284,7 +284,7 @@ int _task_mutex_lock(
|
|||
A.Comm = LOCK_REQ;
|
||||
A.Time.ticks = time;
|
||||
A.Args.l1.mutex = mutex;
|
||||
A.Args.l1.task = K_Task->Ident;
|
||||
A.Args.l1.task = _k_current_task->Ident;
|
||||
KERNEL_ENTRY(&A);
|
||||
return A.Time.rcode;
|
||||
}
|
||||
|
|
@ -399,7 +399,7 @@ void _task_mutex_unlock(kmutex_t mutex /* mutex to unlock */
|
|||
|
||||
A.Comm = UNLOCK;
|
||||
A.Args.l1.mutex = mutex;
|
||||
A.Args.l1.task = K_Task->Ident;
|
||||
A.Args.l1.task = _k_current_task->Ident;
|
||||
KERNEL_ENTRY(&A);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -114,9 +114,9 @@ void K_enqreq(struct k_args *A)
|
|||
#endif
|
||||
} else {
|
||||
if (likely(A->Time.ticks != TICKS_NONE)) {
|
||||
A->Ctxt.proc = K_Task;
|
||||
A->Prio = K_Task->Prio;
|
||||
set_state_bit(K_Task, TF_ENQU);
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
A->Prio = _k_current_task->Prio;
|
||||
set_state_bit(_k_current_task, TF_ENQU);
|
||||
INSERT_ELM(Q->Waiters, A);
|
||||
#ifndef LITE
|
||||
if (A->Time.ticks == TICKS_UNLIMITED)
|
||||
|
|
@ -238,9 +238,9 @@ void K_deqreq(struct k_args *A)
|
|||
Q->Nused = --n;
|
||||
} else {
|
||||
if (likely(A->Time.ticks != TICKS_NONE)) {
|
||||
A->Ctxt.proc = K_Task;
|
||||
A->Prio = K_Task->Prio;
|
||||
set_state_bit(K_Task, TF_DEQU);
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
A->Prio = _k_current_task->Prio;
|
||||
set_state_bit(_k_current_task, TF_DEQU);
|
||||
|
||||
INSERT_ELM(Q->Waiters, A);
|
||||
#ifndef LITE
|
||||
|
|
|
|||
|
|
@ -357,7 +357,7 @@ void K_waitmany(struct k_args *A)
|
|||
struct k_args *R;
|
||||
|
||||
GETARGS(R);
|
||||
R->Prio = K_Task->Prio;
|
||||
R->Prio = _k_current_task->Prio;
|
||||
R->Comm = WAITMREQ;
|
||||
R->Ctxt.args = A;
|
||||
R->Args.s1.sema = *L++;
|
||||
|
|
@ -365,8 +365,8 @@ void K_waitmany(struct k_args *A)
|
|||
(A->Args.s1.nsem)++;
|
||||
}
|
||||
|
||||
A->Ctxt.proc = K_Task;
|
||||
set_state_bit(K_Task, TF_LIST);
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
set_state_bit(_k_current_task, TF_LIST);
|
||||
|
||||
if (A->Time.ticks != TICKS_NONE) {
|
||||
if (A->Time.ticks == TICKS_UNLIMITED)
|
||||
|
|
@ -397,9 +397,9 @@ void K_waitsreq(struct k_args *A)
|
|||
S->Level--;
|
||||
A->Time.rcode = RC_OK;
|
||||
} else if (A->Time.ticks != TICKS_NONE) {
|
||||
A->Ctxt.proc = K_Task;
|
||||
A->Prio = K_Task->Prio;
|
||||
set_state_bit(K_Task, TF_SEMA);
|
||||
A->Ctxt.proc = _k_current_task;
|
||||
A->Prio = _k_current_task->Prio;
|
||||
set_state_bit(_k_current_task, TF_SEMA);
|
||||
INSERT_ELM(S->Waiters, A);
|
||||
#ifndef LITE
|
||||
if (A->Time.ticks == TICKS_UNLIMITED)
|
||||
|
|
@ -457,7 +457,7 @@ ksem_t _task_sem_group_take(ksemg_t group, /* group of semaphores to test */
|
|||
struct k_args A;
|
||||
|
||||
A.Comm = WAITMANY;
|
||||
A.Prio = K_Task->Prio;
|
||||
A.Prio = _k_current_task->Prio;
|
||||
A.Time.ticks = time;
|
||||
A.Args.s1.list = group;
|
||||
KERNEL_ENTRY(&A);
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ void abort_task(struct k_proc *X)
|
|||
void task_abort_handler_set(void (*func)(void) /* abort handler */
|
||||
)
|
||||
{
|
||||
K_Task->fabort = func;
|
||||
_k_current_task->fabort = func;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
|
@ -420,14 +420,14 @@ void task_priority_set(ktask_t task, /* task whose priority is to be set */
|
|||
|
||||
void K_yield(struct k_args *A)
|
||||
{
|
||||
struct k_tqhd *H = _k_task_priority_list + K_Task->Prio;
|
||||
struct k_proc *X = K_Task->Forw;
|
||||
struct k_tqhd *H = _k_task_priority_list + _k_current_task->Prio;
|
||||
struct k_proc *X = _k_current_task->Forw;
|
||||
|
||||
ARG_UNUSED(A);
|
||||
if (X && H->Head == K_Task) {
|
||||
K_Task->Forw = NULL;
|
||||
H->Tail->Forw = K_Task;
|
||||
H->Tail = K_Task;
|
||||
if (X && H->Head == _k_current_task) {
|
||||
_k_current_task->Forw = NULL;
|
||||
H->Tail->Forw = _k_current_task;
|
||||
H->Tail = _k_current_task;
|
||||
H->Head = X;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -468,11 +468,11 @@ void K_sleep(struct k_args *P)
|
|||
T->Args = P;
|
||||
|
||||
P->Comm = WAKEUP;
|
||||
P->Ctxt.proc = K_Task;
|
||||
P->Ctxt.proc = _k_current_task;
|
||||
P->Time.timer = T;
|
||||
|
||||
enlist_timer(T);
|
||||
set_state_bit(K_Task, TF_TIME);
|
||||
set_state_bit(_k_current_task, TF_TIME);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ void K_monitor_args(struct k_args *A)
|
|||
K_monitor_wptr->data2 = MO_EVENT | (uint32_t)A;
|
||||
}
|
||||
else {
|
||||
K_monitor_wptr->data1 = K_Task->Ident;
|
||||
K_monitor_wptr->data1 = _k_current_task->Ident;
|
||||
K_monitor_wptr->data2 = MO_LCOMM | A->Comm;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ static inline int _TlDebugUpdate(int32_t ticks)
|
|||
static inline void _TimeSliceUpdate(void)
|
||||
{
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
int yield = SliceTime && (K_Task->Prio >= SlicePrio) &&
|
||||
int yield = SliceTime && (_k_current_task->Prio >= SlicePrio) &&
|
||||
(++SliceCount >= SliceTime);
|
||||
if (yield) {
|
||||
SliceCount = 0;
|
||||
|
|
|
|||
|
|
@ -131,7 +131,7 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
|||
*/
|
||||
pNextTask = _k_task_priority_list[K_PrioListIdx].Head;
|
||||
|
||||
if (K_Task != pNextTask) {
|
||||
if (_k_current_task != pNextTask) {
|
||||
/*
|
||||
* Need to swap the low priority task,
|
||||
* the task was saved on kernel_entry
|
||||
|
|
@ -151,19 +151,19 @@ FUNC_NORETURN void K_swapper(int parameter1, /* not used */
|
|||
if (pNextTask->Ident == 0x00000000) {
|
||||
WldT_start = timer_read();
|
||||
}
|
||||
if (K_Task->Ident == 0x00000000) {
|
||||
if (_k_current_task->Ident == 0x00000000) {
|
||||
WldT_end = timer_read();
|
||||
Wld_i += (Wld_i0 * (WldT_end - WldT_start)) /
|
||||
WldTDelta;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
K_Task = pNextTask;
|
||||
_k_current_task = pNextTask;
|
||||
_NanoKernel.task = (tCCS *)pNextTask->workspace;
|
||||
|
||||
#ifdef CONFIG_TASK_MONITOR
|
||||
if (K_monitor_mask & MON_TSWAP) {
|
||||
K_monitor_task(K_Task, 0);
|
||||
K_monitor_task(_k_current_task, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ void *_Cget(struct nano_lifo *chan)
|
|||
|
||||
FUNC_NORETURN void _TaskAbort(void)
|
||||
{
|
||||
_task_ioctl(K_Task->Ident, TASK_ABORT);
|
||||
_task_ioctl(_k_current_task->Ident, TASK_ABORT);
|
||||
|
||||
/*
|
||||
* Compiler can't tell that _task_ioctl() won't return and issues
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ def kernel_main_c_tasks():
|
|||
# currently scheduled task (idle task)
|
||||
|
||||
kernel_main_c_out("\n" +
|
||||
"struct k_proc * K_Task = &_k_task_list[%d];\n" % (total_tasks - 1))
|
||||
"struct k_proc * _k_current_task = &_k_task_list[%d];\n" % (total_tasks - 1))
|
||||
|
||||
|
||||
def kernel_main_c_priorities():
|
||||
|
|
|
|||
Loading…
Reference in a new issue