net: buf: remove use of special putter and getter functions
Convert users of net_buf_put() and net_buf_get() functions to use
non-wrapped putters and getters k_fifo_put() and k_fifo_get().
Special handling of net_bufs in k_fifos is no longer needed after commit
3d306c181f, since these actions are now
atomic regardless of any net_buf fragments.
Signed-off-by: Henrik Brix Andersen <henrik@brixandersen.dk>
This commit is contained in:
parent
cb47ed206b
commit
69fe9b0c50
40 changed files with 154 additions and 155 deletions
|
|
@ -252,7 +252,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
/* Let the ISR continue receiving new packets */
|
||||
uart_irq_rx_enable(cfg->uart);
|
||||
|
||||
buf = net_buf_get(&h4->rx.fifo, K_FOREVER);
|
||||
buf = k_fifo_get(&h4->rx.fifo, K_FOREVER);
|
||||
do {
|
||||
uart_irq_rx_enable(cfg->uart);
|
||||
|
||||
|
|
@ -266,7 +266,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
k_yield();
|
||||
|
||||
uart_irq_rx_disable(cfg->uart);
|
||||
buf = net_buf_get(&h4->rx.fifo, K_NO_WAIT);
|
||||
buf = k_fifo_get(&h4->rx.fifo, K_NO_WAIT);
|
||||
} while (buf);
|
||||
}
|
||||
}
|
||||
|
|
@ -352,7 +352,7 @@ static inline void read_payload(const struct device *dev)
|
|||
reset_rx(h4);
|
||||
|
||||
LOG_DBG("Putting buf %p to rx fifo", buf);
|
||||
net_buf_put(&h4->rx.fifo, buf);
|
||||
k_fifo_put(&h4->rx.fifo, buf);
|
||||
}
|
||||
|
||||
static inline void read_header(const struct device *dev)
|
||||
|
|
@ -398,7 +398,7 @@ static inline void process_tx(const struct device *dev)
|
|||
int bytes;
|
||||
|
||||
if (!h4->tx.buf) {
|
||||
h4->tx.buf = net_buf_get(&h4->tx.fifo, K_NO_WAIT);
|
||||
h4->tx.buf = k_fifo_get(&h4->tx.fifo, K_NO_WAIT);
|
||||
if (!h4->tx.buf) {
|
||||
LOG_ERR("TX interrupt but no pending buffer!");
|
||||
uart_irq_tx_disable(cfg->uart);
|
||||
|
|
@ -447,7 +447,7 @@ static inline void process_tx(const struct device *dev)
|
|||
done:
|
||||
h4->tx.type = BT_HCI_H4_NONE;
|
||||
net_buf_unref(h4->tx.buf);
|
||||
h4->tx.buf = net_buf_get(&h4->tx.fifo, K_NO_WAIT);
|
||||
h4->tx.buf = k_fifo_get(&h4->tx.fifo, K_NO_WAIT);
|
||||
if (!h4->tx.buf) {
|
||||
uart_irq_tx_disable(cfg->uart);
|
||||
}
|
||||
|
|
@ -496,7 +496,7 @@ static int h4_send(const struct device *dev, struct net_buf *buf)
|
|||
|
||||
LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
|
||||
|
||||
net_buf_put(&h4->tx.fifo, buf);
|
||||
k_fifo_put(&h4->tx.fifo, buf);
|
||||
uart_irq_tx_enable(cfg->uart);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ static void process_unack(struct h5_data *h5)
|
|||
LOG_DBG("Need to remove %u packet from the queue", number_removed);
|
||||
|
||||
while (number_removed) {
|
||||
struct net_buf *buf = net_buf_get(&h5->unack_queue, K_NO_WAIT);
|
||||
struct net_buf *buf = k_fifo_get(&h5->unack_queue, K_NO_WAIT);
|
||||
|
||||
if (!buf) {
|
||||
LOG_ERR("Unack queue is empty");
|
||||
|
|
@ -349,22 +349,22 @@ static void retx_timeout(struct k_work *work)
|
|||
k_fifo_init(&tmp_queue);
|
||||
|
||||
/* Queue to temporary queue */
|
||||
while ((buf = net_buf_get(&h5->tx_queue, K_NO_WAIT))) {
|
||||
net_buf_put(&tmp_queue, buf);
|
||||
while ((buf = k_fifo_get(&h5->tx_queue, K_NO_WAIT))) {
|
||||
k_fifo_put(&tmp_queue, buf);
|
||||
}
|
||||
|
||||
/* Queue unack packets to the beginning of the queue */
|
||||
while ((buf = net_buf_get(&h5->unack_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&h5->unack_queue, K_NO_WAIT))) {
|
||||
/* include also packet type */
|
||||
net_buf_push(buf, sizeof(uint8_t));
|
||||
net_buf_put(&h5->tx_queue, buf);
|
||||
k_fifo_put(&h5->tx_queue, buf);
|
||||
h5->tx_seq = (h5->tx_seq - 1) & 0x07;
|
||||
h5->unack_queue_len--;
|
||||
}
|
||||
|
||||
/* Queue saved packets from temp queue */
|
||||
while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
|
||||
net_buf_put(&h5->tx_queue, buf);
|
||||
while ((buf = k_fifo_get(&tmp_queue, K_NO_WAIT))) {
|
||||
k_fifo_put(&h5->tx_queue, buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -408,7 +408,7 @@ static void h5_process_complete_packet(const struct device *dev, uint8_t *hdr)
|
|||
net_buf_unref(buf);
|
||||
break;
|
||||
case HCI_3WIRE_LINK_PKT:
|
||||
net_buf_put(&h5->rx_queue, buf);
|
||||
k_fifo_put(&h5->rx_queue, buf);
|
||||
break;
|
||||
case HCI_EVENT_PKT:
|
||||
case HCI_ACLDATA_PKT:
|
||||
|
|
@ -619,7 +619,7 @@ static int h5_queue(const struct device *dev, struct net_buf *buf)
|
|||
|
||||
memcpy(net_buf_push(buf, sizeof(type)), &type, sizeof(type));
|
||||
|
||||
net_buf_put(&h5->tx_queue, buf);
|
||||
k_fifo_put(&h5->tx_queue, buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -653,7 +653,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
k_sleep(K_MSEC(100));
|
||||
break;
|
||||
case ACTIVE:
|
||||
buf = net_buf_get(&h5->tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&h5->tx_queue, K_FOREVER);
|
||||
type = h5_get_type(buf);
|
||||
|
||||
h5_send(dev, buf->data, type, buf->len);
|
||||
|
|
@ -661,7 +661,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
/* buf is dequeued from tx_queue and queued to unack
|
||||
* queue.
|
||||
*/
|
||||
net_buf_put(&h5->unack_queue, buf);
|
||||
k_fifo_put(&h5->unack_queue, buf);
|
||||
h5->unack_queue_len++;
|
||||
|
||||
k_work_reschedule(&h5->retx_work, H5_TX_ACK_TIMEOUT);
|
||||
|
|
@ -689,7 +689,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&h5->rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&h5->rx_queue, K_FOREVER);
|
||||
|
||||
hexdump("=> ", buf->data, buf->len);
|
||||
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
/* Let the ISR continue receiving new packets */
|
||||
rx_isr_start();
|
||||
|
||||
buf = net_buf_get(&rx.fifo, K_FOREVER);
|
||||
buf = k_fifo_get(&rx.fifo, K_FOREVER);
|
||||
do {
|
||||
rx_isr_start();
|
||||
|
||||
|
|
@ -257,7 +257,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
|
||||
rx_isr_stop();
|
||||
|
||||
buf = net_buf_get(&rx.fifo, K_NO_WAIT);
|
||||
buf = k_fifo_get(&rx.fifo, K_NO_WAIT);
|
||||
} while (buf);
|
||||
}
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ static inline void read_payload(void)
|
|||
reset_rx();
|
||||
|
||||
LOG_DBG("Putting buf %p to rx fifo", buf);
|
||||
net_buf_put(&rx.fifo, buf);
|
||||
k_fifo_put(&rx.fifo, buf);
|
||||
}
|
||||
|
||||
static inline void read_header(void)
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
return net_buf_get(&ep_cfg->fifo, K_NO_WAIT);
|
||||
return k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
|
||||
}
|
||||
|
||||
struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep)
|
||||
|
|
@ -169,7 +169,7 @@ struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep)
|
|||
void udc_buf_put(struct udc_ep_config *const ep_cfg,
|
||||
struct net_buf *const buf)
|
||||
{
|
||||
net_buf_put(&ep_cfg->fifo, buf);
|
||||
k_fifo_put(&ep_cfg->fifo, buf);
|
||||
}
|
||||
|
||||
void udc_ep_buf_set_setup(struct net_buf *const buf)
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ static int udc_mcux_ctrl_feed_dout(const struct device *dev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&cfg->fifo, buf);
|
||||
k_fifo_put(&cfg->fifo, buf);
|
||||
|
||||
ret = udc_mcux_ep_feed(dev, cfg, buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ static int udc_mcux_ctrl_feed_dout(const struct device *dev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&cfg->fifo, buf);
|
||||
k_fifo_put(&cfg->fifo, buf);
|
||||
|
||||
ret = udc_mcux_ep_feed(dev, cfg, buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ static int usbd_ctrl_feed_dout(const struct device *dev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&cfg->fifo, buf);
|
||||
k_fifo_put(&cfg->fifo, buf);
|
||||
udc_nrf_clear_control_out(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ static int usbd_ctrl_feed_dout(const struct device *dev, const size_t length)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&cfg->fifo, buf);
|
||||
k_fifo_put(&cfg->fifo, buf);
|
||||
|
||||
HAL_PCD_EP_Receive(&priv->pcd, cfg->addr, buf->data, buf->size);
|
||||
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ static void hci_ipc_rx(uint8_t *data, size_t len)
|
|||
}
|
||||
|
||||
if (buf) {
|
||||
net_buf_put(&tx_queue, buf);
|
||||
k_fifo_put(&tx_queue, buf);
|
||||
|
||||
LOG_HEXDUMP_DBG(buf->data, buf->len, "Final net buffer:");
|
||||
}
|
||||
|
|
@ -205,7 +205,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
int err;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&tx_queue, K_FOREVER);
|
||||
/* Pass buffer to the stack */
|
||||
err = bt_send(buf);
|
||||
if (err) {
|
||||
|
|
@ -412,7 +412,7 @@ int main(void)
|
|||
while (1) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
hci_ipc_send(buf, HCI_REGULAR_MSG);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ int main(void)
|
|||
}
|
||||
|
||||
while (1) {
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
err = spi_send(buf);
|
||||
if (err) {
|
||||
LOG_ERR("Failed to send");
|
||||
|
|
|
|||
|
|
@ -179,7 +179,7 @@ static void rx_isr(void)
|
|||
if (remaining == 0) {
|
||||
/* Packet received */
|
||||
LOG_DBG("putting RX packet in queue.");
|
||||
net_buf_put(&tx_queue, buf);
|
||||
k_fifo_put(&tx_queue, buf);
|
||||
state = ST_IDLE;
|
||||
}
|
||||
break;
|
||||
|
|
@ -212,7 +212,7 @@ static void tx_isr(void)
|
|||
int len;
|
||||
|
||||
if (!buf) {
|
||||
buf = net_buf_get(&uart_tx_queue, K_NO_WAIT);
|
||||
buf = k_fifo_get(&uart_tx_queue, K_NO_WAIT);
|
||||
if (!buf) {
|
||||
uart_irq_tx_disable(hci_uart_dev);
|
||||
return;
|
||||
|
|
@ -253,7 +253,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
int err;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&tx_queue, K_FOREVER);
|
||||
/* Pass buffer to the stack */
|
||||
err = bt_send(buf);
|
||||
if (err) {
|
||||
|
|
@ -273,7 +273,7 @@ static int h4_send(struct net_buf *buf)
|
|||
LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf),
|
||||
buf->len);
|
||||
|
||||
net_buf_put(&uart_tx_queue, buf);
|
||||
k_fifo_put(&uart_tx_queue, buf);
|
||||
uart_irq_tx_enable(hci_uart_dev);
|
||||
|
||||
return 0;
|
||||
|
|
@ -403,7 +403,7 @@ int main(void)
|
|||
while (1) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
err = h4_send(buf);
|
||||
if (err) {
|
||||
LOG_ERR("Failed to send");
|
||||
|
|
|
|||
|
|
@ -211,7 +211,7 @@ static void process_unack(void)
|
|||
LOG_DBG("Need to remove %u packet from the queue", number_removed);
|
||||
|
||||
while (number_removed) {
|
||||
struct net_buf *buf = net_buf_get(&h5.unack_queue, K_NO_WAIT);
|
||||
struct net_buf *buf = k_fifo_get(&h5.unack_queue, K_NO_WAIT);
|
||||
|
||||
if (!buf) {
|
||||
LOG_ERR("Unack queue is empty");
|
||||
|
|
@ -341,22 +341,22 @@ static void retx_timeout(struct k_work *work)
|
|||
k_fifo_init(&tmp_queue);
|
||||
|
||||
/* Queue to temporary queue */
|
||||
while ((buf = net_buf_get(&h5.tx_queue, K_NO_WAIT))) {
|
||||
net_buf_put(&tmp_queue, buf);
|
||||
while ((buf = k_fifo_get(&h5.tx_queue, K_NO_WAIT))) {
|
||||
k_fifo_put(&tmp_queue, buf);
|
||||
}
|
||||
|
||||
/* Queue unack packets to the beginning of the queue */
|
||||
while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&h5.unack_queue, K_NO_WAIT))) {
|
||||
/* include also packet type */
|
||||
net_buf_push(buf, sizeof(uint8_t));
|
||||
net_buf_put(&h5.tx_queue, buf);
|
||||
k_fifo_put(&h5.tx_queue, buf);
|
||||
h5.tx_seq = (h5.tx_seq - 1) & 0x07;
|
||||
unack_queue_len--;
|
||||
}
|
||||
|
||||
/* Queue saved packets from temp queue */
|
||||
while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
|
||||
net_buf_put(&h5.tx_queue, buf);
|
||||
while ((buf = k_fifo_get(&tmp_queue, K_NO_WAIT))) {
|
||||
k_fifo_put(&h5.tx_queue, buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -400,13 +400,13 @@ static void h5_process_complete_packet(uint8_t *hdr)
|
|||
net_buf_unref(buf);
|
||||
break;
|
||||
case HCI_3WIRE_LINK_PKT:
|
||||
net_buf_put(&h5.rx_queue, buf);
|
||||
k_fifo_put(&h5.rx_queue, buf);
|
||||
break;
|
||||
case HCI_COMMAND_PKT:
|
||||
case HCI_ACLDATA_PKT:
|
||||
case HCI_ISODATA_PKT:
|
||||
hexdump("=> ", buf->data, buf->len);
|
||||
net_buf_put(&tx_queue, buf);
|
||||
k_fifo_put(&tx_queue, buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -563,7 +563,7 @@ static int h5_queue(struct net_buf *buf)
|
|||
{
|
||||
LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
|
||||
|
||||
net_buf_put(&h5.tx_queue, buf);
|
||||
k_fifo_put(&h5.tx_queue, buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -597,7 +597,7 @@ static void process_events(struct k_poll_event *ev, int count)
|
|||
case K_POLL_STATE_FIFO_DATA_AVAILABLE:
|
||||
if (ev->tag == 0) {
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&tx_queue, K_NO_WAIT);
|
||||
buf = k_fifo_get(&tx_queue, K_NO_WAIT);
|
||||
__ASSERT_NO_MSG(buf);
|
||||
|
||||
/* Pass buffer to the stack */
|
||||
|
|
@ -607,7 +607,7 @@ static void process_events(struct k_poll_event *ev, int count)
|
|||
net_buf_unref(buf);
|
||||
}
|
||||
} else if (ev->tag == 2) {
|
||||
buf = net_buf_get(&h5.tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&h5.tx_queue, K_FOREVER);
|
||||
__ASSERT_NO_MSG(buf);
|
||||
|
||||
type = h5_get_type(buf);
|
||||
|
|
@ -616,7 +616,7 @@ static void process_events(struct k_poll_event *ev, int count)
|
|||
/* buf is dequeued from tx_queue and queued to unack
|
||||
* queue.
|
||||
*/
|
||||
net_buf_put(&h5.unack_queue, buf);
|
||||
k_fifo_put(&h5.unack_queue, buf);
|
||||
unack_queue_len++;
|
||||
|
||||
k_work_reschedule(&retx_work, H5_TX_ACK_TIMEOUT);
|
||||
|
|
@ -684,19 +684,19 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf, *cache;
|
||||
|
||||
buf = net_buf_get(&h5.rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&h5.rx_queue, K_FOREVER);
|
||||
|
||||
hexdump("=> ", buf->data, buf->len);
|
||||
|
||||
if (!memcmp(buf->data, sync_req, sizeof(sync_req))) {
|
||||
if (h5.link_state == ACTIVE) {
|
||||
while ((cache = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
|
||||
while ((cache = k_fifo_get(&h5.unack_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(cache);
|
||||
}
|
||||
|
||||
unack_queue_len = 0;
|
||||
|
||||
while ((cache = net_buf_get(&h5.tx_queue, K_NO_WAIT))) {
|
||||
while ((cache = k_fifo_get(&h5.tx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(cache);
|
||||
}
|
||||
|
||||
|
|
@ -810,7 +810,7 @@ int main(void)
|
|||
while (1) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
err = h5_queue(buf);
|
||||
if (err) {
|
||||
LOG_ERR("Failed to send");
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ static void send_hw_error(void)
|
|||
net_buf_add_mem(buf, hci_evt_hw_err, sizeof(hci_evt_hw_err));
|
||||
|
||||
/* Inject the message into the c2h queue. */
|
||||
net_buf_put(&c2h_queue, buf);
|
||||
k_fifo_put(&c2h_queue, buf);
|
||||
|
||||
/* The c2h thread will send the message at some point. The host
|
||||
* will receive it and reset the controller.
|
||||
|
|
@ -376,7 +376,7 @@ static void c2h_thread_entry(void)
|
|||
for (;;) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&c2h_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&c2h_queue, K_FOREVER);
|
||||
uart_c2h_tx(buf->data, buf->len);
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ static void rx_isr(void)
|
|||
if (remaining == 0) {
|
||||
/* Packet received */
|
||||
LOG_DBG("putting RX packet in queue.");
|
||||
net_buf_put(&tx_queue, buf);
|
||||
k_fifo_put(&tx_queue, buf);
|
||||
state = ST_IDLE;
|
||||
}
|
||||
break;
|
||||
|
|
@ -343,7 +343,7 @@ static void tx_isr(void)
|
|||
int len;
|
||||
|
||||
if (!buf) {
|
||||
buf = net_buf_get(&uart_tx_queue, K_NO_WAIT);
|
||||
buf = k_fifo_get(&uart_tx_queue, K_NO_WAIT);
|
||||
if (!buf) {
|
||||
uart_irq_tx_disable(hci_uart_dev);
|
||||
return;
|
||||
|
|
@ -384,7 +384,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
uint8_t response[16];
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&tx_queue, K_FOREVER);
|
||||
buf_type = bt_buf_get_type(buf);
|
||||
if (buf_type == H4_ST_VND_CMD) {
|
||||
len = parse_cmd(buf->data, buf->len, response);
|
||||
|
|
@ -411,7 +411,7 @@ static void tx_thread(void *p1, void *p2, void *p3)
|
|||
static int h4_send(struct net_buf *buf)
|
||||
{
|
||||
LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
|
||||
net_buf_put(&uart_tx_queue, buf);
|
||||
k_fifo_put(&uart_tx_queue, buf);
|
||||
uart_irq_tx_enable(hci_uart_dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -450,7 +450,7 @@ int main(void)
|
|||
while (1) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
err = h4_send(buf);
|
||||
if (err) {
|
||||
LOG_ERR("Failed to send");
|
||||
|
|
|
|||
|
|
@ -448,27 +448,27 @@ static struct net_buf *get_first_buf_matching_chan(struct k_fifo *fifo, struct b
|
|||
|
||||
k_fifo_init(&skipped);
|
||||
|
||||
while ((buf = net_buf_get(fifo, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(fifo, K_NO_WAIT))) {
|
||||
meta = bt_att_get_tx_meta_data(buf);
|
||||
if (!ret &&
|
||||
att_chan_matches_chan_opt(chan, meta->chan_opt)) {
|
||||
ret = buf;
|
||||
} else {
|
||||
net_buf_put(&skipped, buf);
|
||||
k_fifo_put(&skipped, buf);
|
||||
}
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(k_fifo_is_empty(fifo));
|
||||
|
||||
while ((buf = net_buf_get(&skipped, K_NO_WAIT))) {
|
||||
net_buf_put(fifo, buf);
|
||||
while ((buf = k_fifo_get(&skipped, K_NO_WAIT))) {
|
||||
k_fifo_put(fifo, buf);
|
||||
}
|
||||
|
||||
__ASSERT_NO_MSG(k_fifo_is_empty(&skipped));
|
||||
|
||||
return ret;
|
||||
} else {
|
||||
return net_buf_get(fifo, K_NO_WAIT);
|
||||
return k_fifo_get(fifo, K_NO_WAIT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -779,7 +779,7 @@ static void bt_att_chan_send_rsp(struct bt_att_chan *chan, struct net_buf *buf)
|
|||
err = chan_send(chan, buf);
|
||||
if (err) {
|
||||
/* Responses need to be sent back using the same channel */
|
||||
net_buf_put(&chan->tx_queue, buf);
|
||||
k_fifo_put(&chan->tx_queue, buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3063,7 +3063,7 @@ static void att_reset(struct bt_att *att)
|
|||
(void)k_work_cancel_delayable_sync(&att->eatt.connection_work, &sync);
|
||||
#endif /* CONFIG_BT_EATT */
|
||||
|
||||
while ((buf = net_buf_get(&att->tx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&att->tx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
|
|
@ -3098,7 +3098,7 @@ static void att_chan_detach(struct bt_att_chan *chan)
|
|||
sys_slist_find_and_remove(&chan->att->chans, &chan->node);
|
||||
|
||||
/* Release pending buffers */
|
||||
while ((buf = net_buf_get(&chan->tx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&chan->tx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
|
|
@ -3927,7 +3927,7 @@ int bt_att_send(struct bt_conn *conn, struct net_buf *buf)
|
|||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
net_buf_put(&att->tx_queue, buf);
|
||||
k_fifo_put(&att->tx_queue, buf);
|
||||
att_send_process(att);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ int bt_l2cap_br_send_cb(struct bt_conn *conn, uint16_t cid, struct net_buf *buf,
|
|||
LOG_DBG("push PDU: cb %p userdata %p", cb, user_data);
|
||||
|
||||
make_closure(buf->user_data, cb, user_data);
|
||||
net_buf_put(&br_chan->_pdu_tx_queue, buf);
|
||||
k_fifo_put(&br_chan->_pdu_tx_queue, buf);
|
||||
raise_data_ready(br_chan);
|
||||
|
||||
return 0;
|
||||
|
|
@ -910,7 +910,7 @@ void bt_l2cap_br_chan_del(struct bt_l2cap_chan *chan)
|
|||
|
||||
/* Remove buffers on the PDU TX queue. */
|
||||
while (chan_has_data(br_chan)) {
|
||||
struct net_buf *buf = net_buf_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
|
||||
struct net_buf *buf = k_fifo_get(&br_chan->_pdu_tx_queue, K_NO_WAIT);
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -261,8 +261,7 @@ static void rfcomm_dlc_disconnect(struct bt_rfcomm_dlc *dlc)
|
|||
/* Queue a dummy buffer to wake up and stop the
|
||||
* tx thread for states where it was running.
|
||||
*/
|
||||
net_buf_put(&dlc->tx_queue,
|
||||
net_buf_alloc(&dummy_pool, K_NO_WAIT));
|
||||
k_fifo_put(&dlc->tx_queue, net_buf_alloc(&dummy_pool, K_NO_WAIT));
|
||||
|
||||
/* There could be a writer waiting for credits so return a
|
||||
* dummy credit to wake it up.
|
||||
|
|
@ -583,7 +582,7 @@ static void rfcomm_dlc_tx_thread(void *p1, void *p2, void *p3)
|
|||
dlc->state == BT_RFCOMM_STATE_USER_DISCONNECT) {
|
||||
/* Get next packet for dlc */
|
||||
LOG_DBG("Wait for buf %p", dlc);
|
||||
buf = net_buf_get(&dlc->tx_queue, timeout);
|
||||
buf = k_fifo_get(&dlc->tx_queue, timeout);
|
||||
/* If its dummy buffer or non user disconnect then break */
|
||||
if ((dlc->state != BT_RFCOMM_STATE_CONNECTED &&
|
||||
dlc->state != BT_RFCOMM_STATE_USER_DISCONNECT) ||
|
||||
|
|
@ -618,7 +617,7 @@ static void rfcomm_dlc_tx_thread(void *p1, void *p2, void *p3)
|
|||
LOG_DBG("dlc %p disconnected - cleaning up", dlc);
|
||||
|
||||
/* Give back any allocated buffers */
|
||||
while ((buf = net_buf_get(&dlc->tx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&dlc->tx_queue, K_NO_WAIT))) {
|
||||
bt_rfcomm_tx_destroy(dlc, buf);
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
|
@ -871,7 +870,7 @@ static int rfcomm_dlc_close(struct bt_rfcomm_dlc *dlc)
|
|||
/* Queue a dummy buffer to wake up and stop the
|
||||
* tx thread.
|
||||
*/
|
||||
net_buf_put(&dlc->tx_queue,
|
||||
k_fifo_put(&dlc->tx_queue,
|
||||
net_buf_alloc(&dummy_pool, K_NO_WAIT));
|
||||
|
||||
/* There could be a writer waiting for credits so return a
|
||||
|
|
@ -1489,7 +1488,7 @@ int bt_rfcomm_dlc_send(struct bt_rfcomm_dlc *dlc, struct net_buf *buf)
|
|||
fcs = rfcomm_calc_fcs(BT_RFCOMM_FCS_LEN_UIH, buf->data);
|
||||
net_buf_add_u8(buf, fcs);
|
||||
|
||||
net_buf_put(&dlc->tx_queue, buf);
|
||||
k_fifo_put(&dlc->tx_queue, buf);
|
||||
|
||||
return buf->len;
|
||||
}
|
||||
|
|
@ -1748,7 +1747,7 @@ int bt_rfcomm_dlc_disconnect(struct bt_rfcomm_dlc *dlc)
|
|||
* and stop the tx thread.
|
||||
*/
|
||||
dlc->state = BT_RFCOMM_STATE_USER_DISCONNECT;
|
||||
net_buf_put(&dlc->tx_queue,
|
||||
k_fifo_put(&dlc->tx_queue,
|
||||
net_buf_alloc(&dummy_pool, K_NO_WAIT));
|
||||
|
||||
k_work_reschedule(&dlc->rtx_work, RFCOMM_DISC_TIMEOUT);
|
||||
|
|
|
|||
|
|
@ -356,7 +356,7 @@ int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
|
|||
return err;
|
||||
}
|
||||
|
||||
net_buf_put(&bt_dev.cmd_tx_queue, buf);
|
||||
k_fifo_put(&bt_dev.cmd_tx_queue, buf);
|
||||
bt_tx_irq_raise();
|
||||
|
||||
return 0;
|
||||
|
|
@ -392,7 +392,7 @@ int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
|
|||
k_sem_init(&sync_sem, 0, 1);
|
||||
cmd(buf)->sync = &sync_sem;
|
||||
|
||||
net_buf_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
|
||||
k_fifo_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
|
||||
bt_tx_irq_raise();
|
||||
|
||||
/* TODO: disallow sending sync commands from syswq altogether */
|
||||
|
|
@ -3042,7 +3042,7 @@ static void hci_core_send_cmd(void)
|
|||
|
||||
/* Get next command */
|
||||
LOG_DBG("fetch cmd");
|
||||
buf = net_buf_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
|
||||
buf = k_fifo_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
|
||||
BT_ASSERT(buf);
|
||||
|
||||
/* Clear out any existing sent command */
|
||||
|
|
|
|||
|
|
@ -225,7 +225,7 @@ int bt_recv(struct net_buf *buf)
|
|||
}
|
||||
|
||||
/* Queue to RAW rx queue */
|
||||
net_buf_put(raw_rx, buf);
|
||||
k_fifo_put(raw_rx, buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -840,7 +840,7 @@ int conn_iso_send(struct bt_conn *conn, struct net_buf *buf, enum bt_iso_timesta
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
net_buf_put(&conn->iso.txq, buf);
|
||||
k_fifo_put(&conn->iso.txq, buf);
|
||||
BT_ISO_DATA_DBG("%p put on list", buf);
|
||||
|
||||
/* only one ISO channel per conn-object */
|
||||
|
|
|
|||
|
|
@ -261,7 +261,7 @@ void bt_l2cap_chan_del(struct bt_l2cap_chan *chan)
|
|||
* `l2cap_chan_destroy()` as it is not called for fixed channels.
|
||||
*/
|
||||
while (chan_has_data(le_chan)) {
|
||||
struct net_buf *buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT);
|
||||
struct net_buf *buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT);
|
||||
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
|
@ -312,7 +312,7 @@ static void l2cap_rx_process(struct k_work *work)
|
|||
struct bt_l2cap_le_chan *ch = CHAN_RX(work);
|
||||
struct net_buf *buf;
|
||||
|
||||
while ((buf = net_buf_get(&ch->rx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&ch->rx_queue, K_NO_WAIT))) {
|
||||
LOG_DBG("ch %p buf %p", ch, buf);
|
||||
l2cap_chan_le_recv(ch, buf);
|
||||
net_buf_unref(buf);
|
||||
|
|
@ -738,7 +738,7 @@ int bt_l2cap_send_pdu(struct bt_l2cap_le_chan *le_chan, struct net_buf *pdu,
|
|||
make_closure(pdu->user_data, cb, user_data);
|
||||
LOG_DBG("push: pdu %p len %d cb %p userdata %p", pdu, pdu->len, cb, user_data);
|
||||
|
||||
net_buf_put(&le_chan->tx_queue, pdu);
|
||||
k_fifo_put(&le_chan->tx_queue, pdu);
|
||||
|
||||
raise_data_ready(le_chan); /* tis just a flag */
|
||||
|
||||
|
|
@ -1299,7 +1299,7 @@ static void l2cap_chan_destroy(struct bt_l2cap_chan *chan)
|
|||
}
|
||||
|
||||
/* Remove buffers on the SDU RX queue */
|
||||
while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
|
|
@ -2259,12 +2259,12 @@ static void l2cap_chan_shutdown(struct bt_l2cap_chan *chan)
|
|||
}
|
||||
|
||||
/* Remove buffers on the TX queue */
|
||||
while ((buf = net_buf_get(&le_chan->tx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&le_chan->tx_queue, K_NO_WAIT))) {
|
||||
l2cap_tx_buf_destroy(chan->conn, buf, -ESHUTDOWN);
|
||||
}
|
||||
|
||||
/* Remove buffers on the RX queue */
|
||||
while ((buf = net_buf_get(&le_chan->rx_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&le_chan->rx_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
|
|
@ -2669,7 +2669,7 @@ static void l2cap_chan_recv_queue(struct bt_l2cap_le_chan *chan,
|
|||
return;
|
||||
}
|
||||
|
||||
net_buf_put(&chan->rx_queue, buf);
|
||||
k_fifo_put(&chan->rx_queue, buf);
|
||||
k_work_submit(&chan->rx_work);
|
||||
}
|
||||
#endif /* CONFIG_BT_L2CAP_DYNAMIC_CHANNEL */
|
||||
|
|
@ -3162,7 +3162,7 @@ static int bt_l2cap_dyn_chan_send(struct bt_l2cap_le_chan *le_chan, struct net_b
|
|||
net_buf_push_le16(buf, sdu_len);
|
||||
|
||||
/* Put buffer on TX queue */
|
||||
net_buf_put(&le_chan->tx_queue, buf);
|
||||
k_fifo_put(&le_chan->tx_queue, buf);
|
||||
|
||||
/* Always process the queue in the same context */
|
||||
raise_data_ready(le_chan);
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ static void l2cap_recv_cb(struct k_work *work)
|
|||
struct l2ch *c = L2CH_WORK(work);
|
||||
struct net_buf *buf;
|
||||
|
||||
while ((buf = net_buf_get(&l2cap_recv_fifo, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&l2cap_recv_fifo, K_NO_WAIT))) {
|
||||
shell_print(ctx_shell, "Confirming reception");
|
||||
bt_l2cap_chan_recv_complete(&c->ch.chan, buf);
|
||||
}
|
||||
|
|
@ -116,7 +116,7 @@ static int l2cap_recv(struct bt_l2cap_chan *chan, struct net_buf *buf)
|
|||
l2cap_recv_delay_ms);
|
||||
}
|
||||
|
||||
net_buf_put(&l2cap_recv_fifo, buf);
|
||||
k_fifo_put(&l2cap_recv_fifo, buf);
|
||||
k_work_schedule(&l2ch->recv_work, K_MSEC(l2cap_recv_delay_ms));
|
||||
|
||||
return -EINPROGRESS;
|
||||
|
|
|
|||
|
|
@ -278,7 +278,7 @@ static void receive_state_machine(struct isotp_recv_ctx *rctx)
|
|||
ud_rem_len = net_buf_user_data(rctx->buf);
|
||||
*ud_rem_len = 0;
|
||||
LOG_DBG("SM process SF of length %d", rctx->length);
|
||||
net_buf_put(&rctx->fifo, rctx->buf);
|
||||
k_fifo_put(&rctx->fifo, rctx->buf);
|
||||
rctx->state = ISOTP_RX_STATE_RECYCLE;
|
||||
receive_state_machine(rctx);
|
||||
break;
|
||||
|
|
@ -300,7 +300,7 @@ static void receive_state_machine(struct isotp_recv_ctx *rctx)
|
|||
rctx->bs = rctx->opts.bs;
|
||||
ud_rem_len = net_buf_user_data(rctx->buf);
|
||||
*ud_rem_len = rctx->length;
|
||||
net_buf_put(&rctx->fifo, rctx->buf);
|
||||
k_fifo_put(&rctx->fifo, rctx->buf);
|
||||
}
|
||||
|
||||
rctx->wft = ISOTP_WFT_FIRST;
|
||||
|
|
@ -538,7 +538,7 @@ static void process_cf(struct isotp_recv_ctx *rctx, struct can_frame *frame)
|
|||
if (rctx->length == 0) {
|
||||
rctx->state = ISOTP_RX_STATE_RECYCLE;
|
||||
*ud_rem_len = 0;
|
||||
net_buf_put(&rctx->fifo, rctx->buf);
|
||||
k_fifo_put(&rctx->fifo, rctx->buf);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -546,7 +546,7 @@ static void process_cf(struct isotp_recv_ctx *rctx, struct can_frame *frame)
|
|||
LOG_DBG("Block is complete. Allocate new buffer");
|
||||
rctx->bs = rctx->opts.bs;
|
||||
*ud_rem_len = rctx->length;
|
||||
net_buf_put(&rctx->fifo, rctx->buf);
|
||||
k_fifo_put(&rctx->fifo, rctx->buf);
|
||||
rctx->state = ISOTP_RX_STATE_TRY_ALLOC;
|
||||
}
|
||||
}
|
||||
|
|
@ -684,7 +684,7 @@ void isotp_unbind(struct isotp_recv_ctx *rctx)
|
|||
|
||||
rctx->state = ISOTP_RX_STATE_UNBOUND;
|
||||
|
||||
while ((buf = net_buf_get(&rctx->fifo, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&rctx->fifo, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
|
||||
|
|
@ -702,7 +702,7 @@ int isotp_recv_net(struct isotp_recv_ctx *rctx, struct net_buf **buffer, k_timeo
|
|||
struct net_buf *buf;
|
||||
int ret;
|
||||
|
||||
buf = net_buf_get(&rctx->fifo, timeout);
|
||||
buf = k_fifo_get(&rctx->fifo, timeout);
|
||||
if (!buf) {
|
||||
ret = rctx->error_nr ? rctx->error_nr : ISOTP_RECV_TIMEOUT;
|
||||
rctx->error_nr = 0;
|
||||
|
|
@ -721,7 +721,7 @@ int isotp_recv(struct isotp_recv_ctx *rctx, uint8_t *data, size_t len, k_timeout
|
|||
int err;
|
||||
|
||||
if (!rctx->recv_buf) {
|
||||
rctx->recv_buf = net_buf_get(&rctx->fifo, timeout);
|
||||
rctx->recv_buf = k_fifo_get(&rctx->fifo, timeout);
|
||||
if (!rctx->recv_buf) {
|
||||
err = rctx->error_nr ? rctx->error_nr : ISOTP_RECV_TIMEOUT;
|
||||
rctx->error_nr = 0;
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ static void smp_client_handle_reqs(struct k_work *work)
|
|||
smp_client = (void *)work;
|
||||
smpt = smp_client->smpt;
|
||||
|
||||
while ((nb = net_buf_get(&smp_client->tx_fifo, K_NO_WAIT)) != NULL) {
|
||||
while ((nb = k_fifo_get(&smp_client->tx_fifo, K_NO_WAIT)) != NULL) {
|
||||
smpt->functions.output(nb);
|
||||
}
|
||||
}
|
||||
|
|
@ -111,7 +111,7 @@ static void smp_client_transport_work_fn(struct k_work *work)
|
|||
entry->nb = net_buf_ref(entry->nb);
|
||||
entry->retry_cnt--;
|
||||
entry->timestamp = time_stamp_ref + CONFIG_SMP_CMD_RETRY_TIME;
|
||||
net_buf_put(&entry->smp_client->tx_fifo, entry->nb);
|
||||
k_fifo_put(&entry->smp_client->tx_fifo, entry->nb);
|
||||
smp_tx_req(&entry->smp_client->work);
|
||||
continue;
|
||||
}
|
||||
|
|
@ -319,7 +319,7 @@ int smp_client_send_cmd(struct smp_client_object *smp_client, struct net_buf *nb
|
|||
/* Increment reference for re-transmission and read smp header */
|
||||
nb = net_buf_ref(nb);
|
||||
smp_cmd_add_to_list(cmd_req);
|
||||
net_buf_put(&smp_client->tx_fifo, nb);
|
||||
k_fifo_put(&smp_client->tx_fifo, nb);
|
||||
smp_tx_req(&smp_client->work);
|
||||
return MGMT_ERR_EOK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ smp_handle_reqs(struct k_work *work)
|
|||
smpt = (void *)work;
|
||||
|
||||
/* Read and handle received messages */
|
||||
while ((nb = net_buf_get(&smpt->fifo, K_NO_WAIT)) != NULL) {
|
||||
while ((nb = k_fifo_get(&smpt->fifo, K_NO_WAIT)) != NULL) {
|
||||
smp_process_packet(smpt, nb);
|
||||
}
|
||||
}
|
||||
|
|
@ -199,7 +199,7 @@ void smp_client_transport_register(struct smp_client_transport_entry *entry)
|
|||
WEAK void
|
||||
smp_rx_req(struct smp_transport *smpt, struct net_buf *nb)
|
||||
{
|
||||
net_buf_put(&smpt->fifo, nb);
|
||||
k_fifo_put(&smpt->fifo, nb);
|
||||
k_work_submit_to_queue(&smp_work_queue, &smpt->work);
|
||||
}
|
||||
|
||||
|
|
@ -230,17 +230,17 @@ void smp_rx_remove_invalid(struct smp_transport *zst, void *arg)
|
|||
*/
|
||||
k_fifo_init(&temp_fifo);
|
||||
|
||||
while ((nb = net_buf_get(&zst->fifo, K_NO_WAIT)) != NULL) {
|
||||
while ((nb = k_fifo_get(&zst->fifo, K_NO_WAIT)) != NULL) {
|
||||
if (!zst->functions.query_valid_check(nb, arg)) {
|
||||
smp_free_buf(nb, zst);
|
||||
} else {
|
||||
net_buf_put(&temp_fifo, nb);
|
||||
k_fifo_put(&temp_fifo, nb);
|
||||
}
|
||||
}
|
||||
|
||||
/* Re-insert the remaining queued operations into the original FIFO */
|
||||
while ((nb = net_buf_get(&temp_fifo, K_NO_WAIT)) != NULL) {
|
||||
net_buf_put(&zst->fifo, nb);
|
||||
while ((nb = k_fifo_get(&temp_fifo, K_NO_WAIT)) != NULL) {
|
||||
k_fifo_put(&zst->fifo, nb);
|
||||
}
|
||||
|
||||
/* If at least one entry remains, queue the workqueue for running */
|
||||
|
|
@ -259,7 +259,7 @@ void smp_rx_clear(struct smp_transport *zst)
|
|||
}
|
||||
|
||||
/* Drain the FIFO of all entries without re-adding any */
|
||||
while ((nb = net_buf_get(&zst->fifo, K_NO_WAIT)) != NULL) {
|
||||
while ((nb = k_fifo_get(&zst->fifo, K_NO_WAIT)) != NULL) {
|
||||
smp_free_buf(nb, zst);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ size_t smp_shell_rx_bytes(struct smp_shell_data *data, const uint8_t *bytes,
|
|||
if (mcumgr_state == SMP_SHELL_MCUMGR_STATE_PAYLOAD &&
|
||||
byte == '\n') {
|
||||
if (data->buf) {
|
||||
net_buf_put(&data->buf_ready, data->buf);
|
||||
k_fifo_put(&data->buf_ready, data->buf);
|
||||
data->buf = NULL;
|
||||
}
|
||||
atomic_clear_bit(&data->esc_state, ESC_MCUMGR_PKT_1);
|
||||
|
|
@ -187,7 +187,7 @@ void smp_shell_process(struct smp_shell_data *data)
|
|||
struct net_buf *nb;
|
||||
|
||||
while (true) {
|
||||
buf = net_buf_get(&data->buf_ready, K_NO_WAIT);
|
||||
buf = k_fifo_get(&data->buf_ready, K_NO_WAIT);
|
||||
if (!buf) {
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ static void hci_tx_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&tx_queue, K_FOREVER);
|
||||
|
||||
if (IS_ENABLED(CONFIG_USB_DEVICE_BLUETOOTH_VS_H4) &&
|
||||
bt_hci_raw_get_mode() == BT_HCI_RAW_MODE_H4) {
|
||||
|
|
@ -188,7 +188,7 @@ static void hci_rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
int err;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
|
||||
err = bt_send(buf);
|
||||
if (err) {
|
||||
|
|
@ -296,7 +296,7 @@ static void acl_read_cb(uint8_t ep, int size, void *priv)
|
|||
}
|
||||
|
||||
if (buf != NULL && pkt_len == buf->len) {
|
||||
net_buf_put(&rx_queue, buf);
|
||||
k_fifo_put(&rx_queue, buf);
|
||||
LOG_DBG("put");
|
||||
buf = NULL;
|
||||
pkt_len = 0;
|
||||
|
|
@ -373,7 +373,7 @@ static uint8_t vs_read_usb_transport_mode(struct net_buf *buf)
|
|||
net_buf_add_u8(rsp, BT_HCI_VS_USB_H2_MODE);
|
||||
net_buf_add_u8(rsp, BT_HCI_VS_USB_H4_MODE);
|
||||
|
||||
net_buf_put(&tx_queue, rsp);
|
||||
k_fifo_put(&tx_queue, rsp);
|
||||
|
||||
return BT_HCI_ERR_EXT_HANDLED;
|
||||
}
|
||||
|
|
@ -430,7 +430,7 @@ static int bluetooth_class_handler(struct usb_setup_packet *setup,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&rx_queue, buf);
|
||||
k_fifo_put(&rx_queue, buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ static void bt_h4_read(uint8_t ep, int size, void *priv)
|
|||
return;
|
||||
}
|
||||
|
||||
net_buf_put(&rx_queue, buf);
|
||||
k_fifo_put(&rx_queue, buf);
|
||||
}
|
||||
|
||||
/* Start a new read transfer */
|
||||
|
|
@ -124,7 +124,7 @@ static void hci_tx_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&tx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&tx_queue, K_FOREVER);
|
||||
|
||||
usb_transfer_sync(bt_h4_ep_data[BT_H4_IN_EP_IDX].ep_addr,
|
||||
buf->data, buf->len, USB_TRANS_WRITE);
|
||||
|
|
@ -142,7 +142,7 @@ static void hci_rx_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
if (bt_send(buf)) {
|
||||
LOG_ERR("Error sending to driver");
|
||||
net_buf_unref(buf);
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ static void rndis_queue_rsp(struct net_buf *rsp)
|
|||
|
||||
LOG_DBG("Queued response pkt %p", rsp);
|
||||
|
||||
net_buf_put(&rndis_tx_queue, rsp);
|
||||
k_fifo_put(&rndis_tx_queue, rsp);
|
||||
}
|
||||
|
||||
/* Notify host about available data */
|
||||
|
|
@ -776,7 +776,7 @@ static int queue_encapsulated_cmd(uint8_t *data, uint32_t len)
|
|||
|
||||
memcpy(net_buf_add(buf, len), data, len);
|
||||
|
||||
net_buf_put(&rndis_cmd_queue, buf);
|
||||
k_fifo_put(&rndis_cmd_queue, buf);
|
||||
|
||||
LOG_DBG("queued buf %p", buf);
|
||||
|
||||
|
|
@ -827,7 +827,7 @@ static int handle_encapsulated_rsp(uint8_t **data, uint32_t *len)
|
|||
|
||||
LOG_DBG("");
|
||||
|
||||
buf = net_buf_get(&rndis_tx_queue, K_NO_WAIT);
|
||||
buf = k_fifo_get(&rndis_tx_queue, K_NO_WAIT);
|
||||
if (!buf) {
|
||||
LOG_ERR("Error getting response buffer");
|
||||
*len = 0U;
|
||||
|
|
@ -894,7 +894,7 @@ static void cmd_thread(void *p1, void *p2, void *p3)
|
|||
while (true) {
|
||||
struct net_buf *buf;
|
||||
|
||||
buf = net_buf_get(&rndis_cmd_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rndis_cmd_queue, K_FOREVER);
|
||||
|
||||
LOG_DBG("got buf %p", buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ static void bt_hci_tx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *bt_buf;
|
||||
uint8_t ep;
|
||||
|
||||
bt_buf = net_buf_get(&bt_hci_tx_queue, K_FOREVER);
|
||||
bt_buf = k_fifo_get(&bt_hci_tx_queue, K_FOREVER);
|
||||
|
||||
switch (bt_buf_get_type(bt_buf)) {
|
||||
case BT_BUF_EVT:
|
||||
|
|
@ -231,7 +231,7 @@ static void bt_hci_rx_thread(void *a, void *b, void *c)
|
|||
int err;
|
||||
|
||||
/* FIXME: Do we need a separate thread for bt_send()? */
|
||||
buf = net_buf_get(&bt_hci_rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&bt_hci_rx_queue, K_FOREVER);
|
||||
|
||||
err = bt_send(buf);
|
||||
if (err) {
|
||||
|
|
@ -354,7 +354,7 @@ static int bt_hci_acl_out_cb(struct usbd_class_data *const c_data,
|
|||
}
|
||||
|
||||
if (hci_data->acl_buf != NULL && hci_data->acl_len == hci_data->acl_buf->len) {
|
||||
net_buf_put(&bt_hci_rx_queue, hci_data->acl_buf);
|
||||
k_fifo_put(&bt_hci_rx_queue, hci_data->acl_buf);
|
||||
hci_data->acl_buf = NULL;
|
||||
hci_data->acl_len = 0;
|
||||
}
|
||||
|
|
@ -440,7 +440,7 @@ static int bt_hci_ctd(struct usbd_class_data *const c_data,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(&bt_hci_rx_queue, cmd_buf);
|
||||
k_fifo_put(&bt_hci_rx_queue, cmd_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ static inline int _zbus_notify_observer(const struct zbus_channel *chan,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
net_buf_put(obs->message_fifo, cloned_buf);
|
||||
k_fifo_put(obs->message_fifo, cloned_buf);
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
@ -464,7 +464,7 @@ int zbus_sub_wait_msg(const struct zbus_observer *sub, const struct zbus_channel
|
|||
_ZBUS_ASSERT(chan != NULL, "chan is required");
|
||||
_ZBUS_ASSERT(msg != NULL, "msg is required");
|
||||
|
||||
struct net_buf *buf = net_buf_get(sub->message_fifo, timeout);
|
||||
struct net_buf *buf = k_fifo_get(sub->message_fifo, timeout);
|
||||
|
||||
if (buf == NULL) {
|
||||
return -ENOMSG;
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ static int drv_send(const struct device *dev, struct net_buf *buf)
|
|||
LOG_HEXDUMP_DBG(buf->data, buf->len, "buf");
|
||||
|
||||
__ASSERT_NO_MSG(buf);
|
||||
net_buf_put(&drv_send_fifo, buf);
|
||||
k_fifo_put(&drv_send_fifo, buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -213,7 +213,7 @@ ZTEST(hci_uart, test_h2c_cmd_flow_control)
|
|||
for (uint16_t i = 0; i < HCI_NORMAL_CMD_BUF_COUNT; i++) {
|
||||
/* The mock controller processes a command. */
|
||||
{
|
||||
struct net_buf *buf = net_buf_get(&drv_send_fifo, TIMEOUT_PRESUME_STUCK);
|
||||
struct net_buf *buf = k_fifo_get(&drv_send_fifo, TIMEOUT_PRESUME_STUCK);
|
||||
|
||||
zassert_not_null(buf);
|
||||
zassert_equal(buf->len, sizeof(h4_msg_cmd_dummy1) - 1, "Wrong length");
|
||||
|
|
@ -241,7 +241,7 @@ ZTEST(hci_uart, test_h2c_cmd_flow_control)
|
|||
for (uint16_t i = 0; i < TEST_PARAM_HOST_COMPLETE_COUNT; i++) {
|
||||
/* The mock controller processes a 'HCI Host Number of Completed Packets'. */
|
||||
{
|
||||
struct net_buf *buf = net_buf_get(&drv_send_fifo, TIMEOUT_PRESUME_STUCK);
|
||||
struct net_buf *buf = k_fifo_get(&drv_send_fifo, TIMEOUT_PRESUME_STUCK);
|
||||
|
||||
zassert_not_null(buf);
|
||||
zassert_equal(buf->len, sizeof(h4_msg_cmd_host_num_complete) - 1,
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -363,7 +363,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -328,7 +328,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -343,7 +343,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -355,7 +355,7 @@ static void rx_thread(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf;
|
||||
|
||||
/* Wait until a buffer is available */
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
recv(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ static void command_status(struct net_buf *buf)
|
|||
*/
|
||||
static void discard_event(void)
|
||||
{
|
||||
struct net_buf *buf = net_buf_get(&event_queue, K_FOREVER);
|
||||
struct net_buf *buf = k_fifo_get(&event_queue, K_FOREVER);
|
||||
|
||||
net_buf_unref(buf);
|
||||
m_events--;
|
||||
|
|
@ -292,7 +292,7 @@ static struct net_buf *queue_event(struct net_buf *buf)
|
|||
bt_buf_set_type(evt, BT_BUF_EVT);
|
||||
net_buf_add_le32(evt, sys_cpu_to_le32(k_uptime_get()));
|
||||
net_buf_add_mem(evt, buf->data, buf->len);
|
||||
net_buf_put(&event_queue, evt);
|
||||
k_fifo_put(&event_queue, evt);
|
||||
m_events++;
|
||||
}
|
||||
return evt;
|
||||
|
|
@ -306,7 +306,7 @@ static void service_events(void *p1, void *p2, void *p3)
|
|||
struct net_buf *buf, *evt;
|
||||
|
||||
while (1) {
|
||||
buf = net_buf_get(&rx_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&rx_queue, K_FOREVER);
|
||||
if (bt_buf_get_type(buf) == BT_BUF_EVT) {
|
||||
|
||||
evt = queue_event(buf);
|
||||
|
|
@ -348,7 +348,7 @@ static void service_events(void *p1, void *p2, void *p3)
|
|||
net_buf_add_le32(data,
|
||||
sys_cpu_to_le32(k_uptime_get()));
|
||||
net_buf_add_mem(data, buf->data, buf->len);
|
||||
net_buf_put(&data_queue, data);
|
||||
k_fifo_put(&data_queue, data);
|
||||
}
|
||||
#if defined(CONFIG_BT_ISO)
|
||||
} else if (bt_buf_get_type(buf) == BT_BUF_ISO_IN) {
|
||||
|
|
@ -360,7 +360,7 @@ static void service_events(void *p1, void *p2, void *p3)
|
|||
net_buf_add_le32(data,
|
||||
sys_cpu_to_le32(k_uptime_get()));
|
||||
net_buf_add_mem(data, buf->data, buf->len);
|
||||
net_buf_put(&iso_data_queue, data);
|
||||
k_fifo_put(&iso_data_queue, data);
|
||||
}
|
||||
#endif /* CONFIG_BT_ISO */
|
||||
}
|
||||
|
|
@ -378,7 +378,7 @@ static void flush_events(uint16_t size)
|
|||
uint16_t response = sys_cpu_to_le16(CMD_FLUSH_EVENTS_RSP);
|
||||
struct net_buf *buf;
|
||||
|
||||
while ((buf = net_buf_get(&event_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&event_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
m_events--;
|
||||
}
|
||||
|
|
@ -401,7 +401,7 @@ static void get_event(uint16_t size)
|
|||
size = 0;
|
||||
|
||||
edtt_write((uint8_t *)&response, sizeof(response), EDTTT_BLOCK);
|
||||
buf = net_buf_get(&event_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&event_queue, K_FOREVER);
|
||||
if (buf) {
|
||||
size = sys_cpu_to_le16(buf->len);
|
||||
edtt_write((uint8_t *)&size, sizeof(size), EDTTT_BLOCK);
|
||||
|
|
@ -428,7 +428,7 @@ static void get_events(uint16_t size)
|
|||
edtt_write((uint8_t *)&response, sizeof(response), EDTTT_BLOCK);
|
||||
edtt_write((uint8_t *)&count, sizeof(count), EDTTT_BLOCK);
|
||||
while (count--) {
|
||||
buf = net_buf_get(&event_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&event_queue, K_FOREVER);
|
||||
size = sys_cpu_to_le16(buf->len);
|
||||
edtt_write((uint8_t *)&size, sizeof(size), EDTTT_BLOCK);
|
||||
edtt_write((uint8_t *)buf->data, buf->len, EDTTT_BLOCK);
|
||||
|
|
@ -467,7 +467,7 @@ static void le_flush_data(uint16_t size)
|
|||
uint16_t response = sys_cpu_to_le16(CMD_LE_FLUSH_DATA_RSP);
|
||||
struct net_buf *buf;
|
||||
|
||||
while ((buf = net_buf_get(&data_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&data_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
read_excess_bytes(size);
|
||||
|
|
@ -514,7 +514,7 @@ static void le_data_read(uint16_t size)
|
|||
size = 0;
|
||||
|
||||
edtt_write((uint8_t *)&response, sizeof(response), EDTTT_BLOCK);
|
||||
buf = net_buf_get(&data_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&data_queue, K_FOREVER);
|
||||
if (buf) {
|
||||
size = sys_cpu_to_le16(buf->len);
|
||||
edtt_write((uint8_t *)&size, sizeof(size), EDTTT_BLOCK);
|
||||
|
|
@ -584,7 +584,7 @@ static void le_flush_iso_data(uint16_t size)
|
|||
uint16_t response = sys_cpu_to_le16(CMD_LE_FLUSH_ISO_DATA_RSP);
|
||||
struct net_buf *buf;
|
||||
|
||||
while ((buf = net_buf_get(&iso_data_queue, K_NO_WAIT))) {
|
||||
while ((buf = k_fifo_get(&iso_data_queue, K_NO_WAIT))) {
|
||||
net_buf_unref(buf);
|
||||
}
|
||||
read_excess_bytes(size);
|
||||
|
|
@ -631,7 +631,7 @@ static void le_iso_data_read(uint16_t size)
|
|||
size = 0;
|
||||
|
||||
edtt_write((uint8_t *)&response, sizeof(response), EDTTT_BLOCK);
|
||||
buf = net_buf_get(&iso_data_queue, K_FOREVER);
|
||||
buf = k_fifo_get(&iso_data_queue, K_FOREVER);
|
||||
if (buf) {
|
||||
size = sys_cpu_to_le16(buf->len);
|
||||
edtt_write((uint8_t *)&size, sizeof(size), EDTTT_BLOCK);
|
||||
|
|
|
|||
|
|
@ -140,8 +140,8 @@ ZTEST(net_buf_tests, test_net_buf_2)
|
|||
}
|
||||
|
||||
k_fifo_init(&fifo);
|
||||
net_buf_put(&fifo, head);
|
||||
head = net_buf_get(&fifo, K_NO_WAIT);
|
||||
k_fifo_put(&fifo, head);
|
||||
head = k_fifo_get(&fifo, K_NO_WAIT);
|
||||
|
||||
destroy_called = 0;
|
||||
net_buf_unref(head);
|
||||
|
|
@ -159,7 +159,7 @@ static void test_3_thread(void *arg1, void *arg2, void *arg3)
|
|||
|
||||
k_sem_give(sema);
|
||||
|
||||
buf = net_buf_get(fifo, TEST_TIMEOUT);
|
||||
buf = k_fifo_get(fifo, TEST_TIMEOUT);
|
||||
zassert_not_null(buf, "Unable to get buffer");
|
||||
|
||||
destroy_called = 0;
|
||||
|
|
@ -201,7 +201,7 @@ ZTEST(net_buf_tests, test_net_buf_3)
|
|||
zassert_true(k_sem_take(&sema, TEST_TIMEOUT) == 0,
|
||||
"Timeout while waiting for semaphore");
|
||||
|
||||
net_buf_put(&fifo, head);
|
||||
k_fifo_put(&fifo, head);
|
||||
|
||||
zassert_true(k_sem_take(&sema, TEST_TIMEOUT) == 0,
|
||||
"Timeout while waiting for semaphore");
|
||||
|
|
|
|||
Loading…
Reference in a new issue