irq: Fix irq_lock api usage
irq_lock returns an unsigned int, though, several places was using signed int. This commit fix this behaviour. In order to avoid this error happens again, a coccinelle script was added and can be used to check violations. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
ec462f872c
commit
0866d18d03
45 changed files with 124 additions and 73 deletions
|
|
@ -38,7 +38,7 @@
|
|||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
_arc_v2_irq_unit_int_enable(irq);
|
||||
irq_unlock(key);
|
||||
|
|
@ -55,7 +55,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
_arc_v2_irq_unit_int_disable(irq);
|
||||
irq_unlock(key);
|
||||
|
|
@ -79,7 +79,7 @@ void _irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
|
|||
{
|
||||
ARG_UNUSED(flags);
|
||||
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
__ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS,
|
||||
"invalid priority %d for irq %d", prio, irq);
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ void _irq_do_offload(void)
|
|||
|
||||
void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
offload_routine = routine;
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ void _irq_spurious(void *unused)
|
|||
void _arch_isr_direct_pm(void)
|
||||
{
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
/* irq_lock() does what we wan for this CPU */
|
||||
key = irq_lock();
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ static int mcimx6x_m4_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* Old interrupt lock level */
|
||||
unsigned int oldLevel; /* Old interrupt lock level */
|
||||
|
||||
/* Disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ static int imxrt_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ static int fsl_frdm_k64f_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
#if !defined(CONFIG_HAS_SYSMPU)
|
||||
u32_t temp_reg;
|
||||
#endif /* !CONFIG_HAS_SYSMPU */
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ static int kl2x_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ static int kw2xd_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ static int kwx_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ static int nxp_lpc54114_init(struct device *arg)
|
|||
ARG_UNUSED(arg);
|
||||
|
||||
/* old interrupt lock level */
|
||||
int oldLevel;
|
||||
unsigned int oldLevel;
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ static int silabs_efm32wg_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ static int silabs_efr32fg1p_init(struct device *arg)
|
|||
{
|
||||
ARG_UNUSED(arg);
|
||||
|
||||
int oldLevel; /* old interrupt lock level */
|
||||
unsigned int oldLevel; /* old interrupt lock level */
|
||||
|
||||
/* disable interrupts */
|
||||
oldLevel = irq_lock();
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ void _irq_spurious(void *unused)
|
|||
void _arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
void _arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
u32_t ienable;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ void _irq_do_offload(void)
|
|||
|
||||
void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
_offload_routine = routine;
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ void _irq_do_offload(void)
|
|||
|
||||
void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
_offload_routine = routine;
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
void _arch_irq_enable(unsigned int irq)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
/*
|
||||
|
|
@ -28,7 +28,7 @@ void _arch_irq_enable(unsigned int irq)
|
|||
|
||||
void _arch_irq_disable(unsigned int irq)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
PULP_IER &= ~(1 << irq);
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ void _irq_do_offload(void)
|
|||
|
||||
void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
/*
|
||||
* Lock interrupts here to prevent any concurrency issues with
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ static inline struct line_buf *telnet_rb_get_line_in(void)
|
|||
/* The actual printk hook */
|
||||
static int telnet_console_out(int c)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
struct line_buf *lb = telnet_rb_get_line_in();
|
||||
bool yield = false;
|
||||
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ static inline struct line_buf *ws_rb_get_line_in(void)
|
|||
/* The actual printk hook */
|
||||
static int ws_console_out(int c)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
struct line_buf *lb = ws_rb_get_line_in();
|
||||
bool yield = false;
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ static int flash_mcux_erase(struct device *dev, off_t offset, size_t len)
|
|||
struct flash_priv *priv = dev->driver_data;
|
||||
u32_t addr;
|
||||
status_t rc;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
if (k_sem_take(&priv->write_lock, K_NO_WAIT)) {
|
||||
return -EACCES;
|
||||
|
|
@ -80,7 +80,7 @@ static int flash_mcux_write(struct device *dev, off_t offset,
|
|||
struct flash_priv *priv = dev->driver_data;
|
||||
u32_t addr;
|
||||
status_t rc;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
if (k_sem_take(&priv->write_lock, K_NO_WAIT)) {
|
||||
return -EACCES;
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ static int config_interrupt(u32_t pin, int flags)
|
|||
volatile u32_t *reg = gpio_pin_reg(pin);
|
||||
int type = convert_int_type(flags);
|
||||
u32_t v;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
if (type < 0) {
|
||||
return type;
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ static int i2c_esp32_configure(struct device *dev, u32_t dev_config)
|
|||
{
|
||||
const struct i2c_esp32_config *config = dev->config->config_info;
|
||||
struct i2c_esp32_data *data = dev->driver_data;
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
u32_t v = 0;
|
||||
int ret;
|
||||
|
||||
|
|
@ -659,7 +659,7 @@ static int i2c_esp32_init(struct device *dev)
|
|||
{
|
||||
const struct i2c_esp32_config *config = dev->config->config_info;
|
||||
struct i2c_esp32_data *data = dev->driver_data;
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
k_sem_init(&data->fifo_sem, 1, 1);
|
||||
k_sem_init(&data->transfer_sem, 1, 1);
|
||||
|
|
|
|||
|
|
@ -323,7 +323,7 @@ void _ioapic_int_vec_set(unsigned int irq, unsigned int vector)
|
|||
static u32_t __IoApicGet(s32_t offset)
|
||||
{
|
||||
u32_t value; /* value */
|
||||
int key; /* interrupt lock level */
|
||||
unsigned int key; /* interrupt lock level */
|
||||
|
||||
/* lock interrupts to ensure indirect addressing works "atomically" */
|
||||
|
||||
|
|
@ -350,7 +350,7 @@ static u32_t __IoApicGet(s32_t offset)
|
|||
*/
|
||||
static void __IoApicSet(s32_t offset, u32_t value)
|
||||
{
|
||||
int key; /* interrupt lock level */
|
||||
unsigned int key; /* interrupt lock level */
|
||||
|
||||
/* lock interrupts to ensure indirect addressing works "atomically" */
|
||||
|
||||
|
|
|
|||
|
|
@ -297,7 +297,7 @@ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
|
|||
unsigned int vector /* vector to copy into the LVT */
|
||||
)
|
||||
{
|
||||
s32_t oldLevel; /* previous interrupt lock level */
|
||||
unsigned int oldLevel; /* previous interrupt lock level */
|
||||
|
||||
/*
|
||||
* The following mappings are used:
|
||||
|
|
@ -334,7 +334,7 @@ void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
|
|||
|
||||
void _loapic_irq_enable(unsigned int irq)
|
||||
{
|
||||
s32_t oldLevel; /* previous interrupt lock level */
|
||||
unsigned int oldLevel; /* previous interrupt lock level */
|
||||
|
||||
/*
|
||||
* See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings
|
||||
|
|
@ -363,7 +363,7 @@ void _loapic_irq_enable(unsigned int irq)
|
|||
|
||||
void _loapic_irq_disable(unsigned int irq)
|
||||
{
|
||||
s32_t oldLevel; /* previous interrupt lock level */
|
||||
unsigned int oldLevel; /* previous interrupt lock level */
|
||||
|
||||
/*
|
||||
* See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ static inline u32_t compute_ioregsel(unsigned int irq)
|
|||
*/
|
||||
static void _mvic_rte_set(unsigned int irq, u32_t value)
|
||||
{
|
||||
int key; /* interrupt lock level */
|
||||
unsigned int key; /* interrupt lock level */
|
||||
u32_t regsel;
|
||||
|
||||
__ASSERT(!(value & ~MVIC_IOWIN_SUPPORTED_BITS_MASK),
|
||||
|
|
@ -94,7 +94,7 @@ static void _mvic_rte_set(unsigned int irq, u32_t value)
|
|||
*/
|
||||
static void _mvic_rte_update(unsigned int irq, u32_t value, u32_t mask)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
u32_t regsel, old_value, updated_value;
|
||||
|
||||
__ASSERT(!(value & ~MVIC_IOWIN_SUPPORTED_BITS_MASK),
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ static int mcux_mailbox_ipm_send(struct device *d, int wait, u32_t id,
|
|||
u32_t data32[MCUX_IPM_DATA_REGS]; /* Until we change API
|
||||
* to u32_t array
|
||||
*/
|
||||
int flags;
|
||||
unsigned int flags;
|
||||
int i;
|
||||
|
||||
ARG_UNUSED(wait);
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ static int quark_se_ipm_send(struct device *d, int wait, u32_t id,
|
|||
const struct quark_se_ipm_config_info *config = d->config->config_info;
|
||||
volatile struct quark_se_ipm *ipm = config->ipm;
|
||||
u32_t data32[4]; /* Until we change API to u32_t array */
|
||||
int flags;
|
||||
unsigned int flags;
|
||||
int i;
|
||||
|
||||
if (id > QUARK_SE_IPM_MAX_ID_VAL) {
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ static int uart_imx_init(struct device *dev)
|
|||
{
|
||||
UART_Type *uart = UART_STRUCT(dev);
|
||||
const struct imx_uart_config *config = dev->config->config_info;
|
||||
int old_level;
|
||||
unsigned int old_level;
|
||||
|
||||
/* disable interrupts */
|
||||
old_level = irq_lock();
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ static int uart_ns16550_init(struct device *dev)
|
|||
{
|
||||
struct uart_ns16550_dev_data_t * const dev_data = DEV_DATA(dev);
|
||||
|
||||
int old_level; /* old interrupt lock level */
|
||||
unsigned int old_level; /* old interrupt lock level */
|
||||
u8_t mdc = 0;
|
||||
|
||||
if (!ns16550_pci_uart_scan(dev)) {
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ static inline int32_t _get_max_clock_time(void)
|
|||
|
||||
static inline void _set_max_clock_time(void)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
_sys_clock_tick_count = _get_elapsed_clock_time();
|
||||
|
|
@ -181,7 +181,7 @@ void _set_time(u32_t time)
|
|||
{
|
||||
u32_t C; /* (current) time */
|
||||
u32_t F; /* Time to program */
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
if (!time) {
|
||||
idle_original_ticks = 0;
|
||||
|
|
@ -225,7 +225,7 @@ void _enable_sys_clock(void)
|
|||
u64_t _get_elapsed_clock_time(void)
|
||||
{
|
||||
u32_t C;
|
||||
int key;
|
||||
unsigned int key;
|
||||
u64_t total;
|
||||
u32_t elapsed;
|
||||
|
||||
|
|
|
|||
|
|
@ -115,7 +115,8 @@ static void adjust_owner_prio(struct k_mutex *mutex, int new_prio)
|
|||
|
||||
int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
|
||||
{
|
||||
int new_prio, key;
|
||||
int new_prio;
|
||||
unsigned int key;
|
||||
|
||||
_sched_lock();
|
||||
|
||||
|
|
@ -200,7 +201,7 @@ Z_SYSCALL_HANDLER(k_mutex_lock, mutex, timeout)
|
|||
|
||||
void _impl_k_mutex_unlock(struct k_mutex *mutex)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
__ASSERT(mutex->lock_count > 0, "");
|
||||
__ASSERT(mutex->owner == _current, "");
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
|||
*/
|
||||
if (timeout != K_FOREVER) {
|
||||
s32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
_add_thread_timeout(thread, wait_q, ticks);
|
||||
irq_unlock(key);
|
||||
|
|
@ -839,7 +839,7 @@ Z_SYSCALL_HANDLER(k_sleep, duration)
|
|||
|
||||
void _impl_k_wakeup(k_tid_t thread)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
/* verify first if thread is not waiting on an object */
|
||||
if (_is_thread_pending(thread)) {
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ static atomic_t global_lock;
|
|||
|
||||
unsigned int _smp_global_lock(void)
|
||||
{
|
||||
int key = _arch_irq_lock();
|
||||
unsigned int key = _arch_irq_lock();
|
||||
|
||||
if (!_current->base.global_lock_count) {
|
||||
while (!atomic_cas(&global_lock, 0, 1)) {
|
||||
|
|
@ -84,7 +84,7 @@ static void smp_init_top(int key, void *arg)
|
|||
};
|
||||
|
||||
_arch_curr_cpu()->current = &dummy_thread;
|
||||
int k = irq_lock();
|
||||
unsigned int k = irq_lock();
|
||||
smp_timer_init();
|
||||
_Swap(k);
|
||||
|
||||
|
|
|
|||
|
|
@ -302,7 +302,7 @@ void _setup_new_thread(struct k_thread *new_thread,
|
|||
new_thread->entry.parameter2 = p2;
|
||||
new_thread->entry.parameter3 = p3;
|
||||
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
new_thread->next_thread = _kernel.threads;
|
||||
_kernel.threads = new_thread;
|
||||
|
|
@ -448,7 +448,7 @@ int _impl_k_thread_cancel(k_tid_t tid)
|
|||
{
|
||||
struct k_thread *thread = tid;
|
||||
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
if (_has_thread_started(thread) ||
|
||||
!_is_thread_timeout_active(thread)) {
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ Z_SYSCALL_HANDLER(k_timer_start, timer, duration_p, period_p)
|
|||
|
||||
void _impl_k_timer_stop(struct k_timer *timer)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ static struct dyn_obj *dyn_object_find(void *obj)
|
|||
{
|
||||
struct rbnode *node;
|
||||
struct dyn_obj *ret;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
/* For any dynamically allocated kernel object, the object
|
||||
* pointer is just a member of the conatining struct dyn_obj,
|
||||
|
|
@ -201,7 +201,7 @@ static void _thread_idx_free(u32_t tidx)
|
|||
void *_impl_k_object_alloc(enum k_objects otype)
|
||||
{
|
||||
struct dyn_obj *dyn_obj;
|
||||
int key;
|
||||
unsigned int key;
|
||||
u32_t tidx;
|
||||
|
||||
/* Stacks are not supported, we don't yet have mem pool APIs
|
||||
|
|
@ -248,7 +248,7 @@ void *_impl_k_object_alloc(enum k_objects otype)
|
|||
void k_object_free(void *obj)
|
||||
{
|
||||
struct dyn_obj *dyn_obj;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
/* This function is intentionally not exposed to user mode.
|
||||
* There's currently no robust way to track that an object isn't
|
||||
|
|
@ -292,7 +292,7 @@ struct _k_object *_k_object_find(void *obj)
|
|||
|
||||
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
struct dyn_obj *obj, *next;
|
||||
|
||||
_k_object_gperf_wordlist_foreach(func, context);
|
||||
|
|
@ -393,7 +393,7 @@ void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
|
|||
int index = thread_index_get(thread);
|
||||
|
||||
if (index != -1) {
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
|
||||
unref_check(ko);
|
||||
|
|
@ -404,7 +404,7 @@ void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
|
|||
static void clear_perms_cb(struct _k_object *ko, void *ctx_ptr)
|
||||
{
|
||||
int id = (int)ctx_ptr;
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, id);
|
||||
unref_check(ko);
|
||||
|
|
@ -582,7 +582,7 @@ void _k_object_uninit(void *object)
|
|||
void *z_user_alloc_from_copy(void *src, size_t size)
|
||||
{
|
||||
void *dst = NULL;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
|
|
@ -606,7 +606,7 @@ out_err:
|
|||
static int user_copy(void *dst, void *src, size_t size, bool to_user)
|
||||
{
|
||||
int ret = EFAULT;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
|
|
@ -636,7 +636,8 @@ int z_user_to_copy(void *dst, void *src, size_t size)
|
|||
char *z_user_string_alloc_copy(char *src, size_t maxlen)
|
||||
{
|
||||
unsigned long actual_len;
|
||||
int key, err;
|
||||
int err;
|
||||
unsigned int key;
|
||||
char *ret = NULL;
|
||||
|
||||
key = irq_lock();
|
||||
|
|
@ -663,7 +664,8 @@ out:
|
|||
int z_user_string_copy(char *dst, char *src, size_t maxlen)
|
||||
{
|
||||
unsigned long actual_len;
|
||||
int key, ret, err;
|
||||
int ret, err;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
actual_len = z_user_string_nlen(src, maxlen, &err);
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
|||
struct k_delayed_work *work,
|
||||
s32_t delay)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
int err;
|
||||
|
||||
/* Work cannot be active in multiple queues */
|
||||
|
|
@ -117,7 +117,7 @@ done:
|
|||
|
||||
int k_delayed_work_cancel(struct k_delayed_work *work)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
if (!work->work_q) {
|
||||
irq_unlock(key);
|
||||
|
|
|
|||
47
scripts/coccinelle/irq_lock.cocci
Normal file
47
scripts/coccinelle/irq_lock.cocci
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
@find@
|
||||
type T;
|
||||
position p;
|
||||
identifier i;
|
||||
@@
|
||||
|
||||
T@p i = irq_lock();
|
||||
|
||||
@script:python raise_error@
|
||||
t << find.T;
|
||||
@@
|
||||
if t in ["uint32_t", "unsigned int", "u32_t"]:
|
||||
cocci.include_match(False)
|
||||
|
||||
@replacement@
|
||||
type find.T;
|
||||
position find.p;
|
||||
@@
|
||||
- T@p
|
||||
+ unsigned int
|
||||
|
||||
@find2@
|
||||
type T;
|
||||
position p;
|
||||
identifier i;
|
||||
@@
|
||||
|
||||
T@p i;
|
||||
...
|
||||
i = irq_lock();
|
||||
|
||||
@script:python raise_error2@
|
||||
t << find2.T;
|
||||
@@
|
||||
if t in ["uint32_t", "unsigned int", "u32_t"]:
|
||||
cocci.include_match(False)
|
||||
|
||||
@replacement2@
|
||||
type find2.T;
|
||||
identifier find2.i;
|
||||
@@
|
||||
- T i;
|
||||
+ unsigned int i;
|
||||
|
|
@ -309,7 +309,7 @@ int net_tcp_release(struct net_tcp *tcp)
|
|||
{
|
||||
struct net_pkt *pkt;
|
||||
struct net_pkt *tmp;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
if (!PART_OF_ARRAY(tcp_context, tcp)) {
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -662,7 +662,7 @@ s32_t gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns *usns)
|
|||
|
||||
static s32_t timer_get_remaining_and_stop(struct k_timer *timer)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
s32_t timer_value;
|
||||
|
||||
key = irq_lock();
|
||||
|
|
|
|||
|
|
@ -1309,7 +1309,7 @@ done:
|
|||
void usb_cancel_transfer(u8_t ep)
|
||||
{
|
||||
struct usb_transfer_data *trans;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ void alt_thread1(void)
|
|||
|
||||
void alt_thread2(void)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
k_oops();
|
||||
|
|
@ -105,7 +105,7 @@ void alt_thread2(void)
|
|||
|
||||
void alt_thread3(void)
|
||||
{
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
k_panic();
|
||||
|
|
@ -135,7 +135,7 @@ void stack_thread1(void)
|
|||
|
||||
void stack_thread2(void)
|
||||
{
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
/* Test that stack overflow check due to swap works */
|
||||
blow_up_stack();
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ static void phil_entry(void)
|
|||
struct k_sem *f1; /* fork #1 */
|
||||
struct k_sem *f2; /* fork #2 */
|
||||
static int myId; /* next philosopher ID */
|
||||
int pri = irq_lock(); /* interrupt lock level */
|
||||
unsigned int pri = irq_lock(); /* interrupt lock level */
|
||||
int id = myId++; /* current philosopher ID */
|
||||
|
||||
irq_unlock(pri);
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ static inline void *my_lifo_get(struct k_lifo *lifo, s32_t timeout)
|
|||
static int increment_counter(void)
|
||||
{
|
||||
int tmp;
|
||||
int key = irq_lock();
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
tmp = ++counter;
|
||||
irq_unlock(key);
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ int logger_put(struct log_cbuffer *logger, char *data, u32_t data_size)
|
|||
{
|
||||
int ret;
|
||||
u8_t size32;
|
||||
int key;
|
||||
unsigned int key;
|
||||
|
||||
size32 = (data_size + 3) / 4;
|
||||
|
||||
|
|
@ -84,10 +84,11 @@ void test_logging(void)
|
|||
static inline void ring_buf_print(struct ring_buf *buf)
|
||||
{
|
||||
u8_t data[512];
|
||||
int ret, key;
|
||||
int ret;
|
||||
u8_t size32 = sizeof(data) / 4;
|
||||
u16_t type;
|
||||
u8_t val;
|
||||
unsigned int key;
|
||||
|
||||
key = irq_lock();
|
||||
ret = sys_ring_buf_get(&log_cbuffer.ring_buffer, &type, &val,
|
||||
|
|
|
|||
Loading…
Reference in a new issue