From 50596320d44a77abd1b3238aa5e2805216ae11e0 Mon Sep 17 00:00:00 2001 From: Tahsin Mutlugun Date: Mon, 11 Dec 2023 15:26:28 +0300 Subject: [PATCH] drivers: serial: uart_max32: Add async mode support This commit adds asynchronous mode support to MAX32 UART driver. Each direction uses a single DMA channel that is assigned in devicetree configuration. Asynchronous mode also depends on interrupts to refresh receive timeouts. Signed-off-by: Tahsin Mutlugun --- drivers/serial/Kconfig.max32 | 2 + drivers/serial/uart_max32.c | 570 ++++++++++++++++++++++-- dts/bindings/serial/adi,max32-uart.yaml | 16 + 3 files changed, 562 insertions(+), 26 deletions(-) diff --git a/drivers/serial/Kconfig.max32 b/drivers/serial/Kconfig.max32 index 99b5c7b45ef..c50199bc856 100644 --- a/drivers/serial/Kconfig.max32 +++ b/drivers/serial/Kconfig.max32 @@ -10,6 +10,8 @@ config UART_MAX32 select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL + select SERIAL_SUPPORT_ASYNC if DT_HAS_ADI_MAX32_DMA_ENABLED + select DMA if UART_ASYNC_API help This option enables the UART driver for MAX32 family of processors. diff --git a/drivers/serial/uart_max32.c b/drivers/serial/uart_max32.c index 280c5997b63..1b5a4ad250f 100644 --- a/drivers/serial/uart_max32.c +++ b/drivers/serial/uart_max32.c @@ -4,6 +4,9 @@ * SPDX-License-Identifier: Apache-2.0 */ +#ifdef CONFIG_UART_ASYNC_API +#include +#endif #include #include #include @@ -16,23 +19,65 @@ LOG_MODULE_REGISTER(uart_max32, CONFIG_UART_LOG_LEVEL); +#ifdef CONFIG_UART_ASYNC_API +struct max32_uart_dma_config { + const struct device *dev; + const uint32_t channel; + const uint32_t slot; +}; +#endif /* CONFIG_UART_ASYNC_API */ + struct max32_uart_config { mxc_uart_regs_t *regs; const struct pinctrl_dev_config *pctrl; const struct device *clock; struct max32_perclk perclk; struct uart_config uart_conf; -#ifdef CONFIG_UART_INTERRUPT_DRIVEN +#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uart_irq_config_func_t irq_config_func; -#endif /* CONFIG_UART_INTERRUPT_DRIVEN */ +#endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API */ +#ifdef CONFIG_UART_ASYNC_API + const struct max32_uart_dma_config tx_dma; + const struct max32_uart_dma_config rx_dma; +#endif /* CONFIG_UART_ASYNC_API */ }; +#ifdef CONFIG_UART_ASYNC_API +struct max32_uart_async_tx { + const uint8_t *buf; + size_t len; + struct k_work_delayable timeout_work; +}; + +struct max32_uart_async_rx { + uint8_t *buf; + size_t len; + size_t offset; + size_t counter; + uint8_t *next_buf; + size_t next_len; + int32_t timeout; + struct k_work_delayable timeout_work; +}; + +struct max32_uart_async_data { + const struct device *uart_dev; + struct max32_uart_async_tx tx; + struct max32_uart_async_rx rx; + uart_callback_t cb; + void *user_data; +}; +#endif + struct max32_uart_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /* Interrupt callback */ void *cb_data; /* Interrupt callback arg */ uint32_t flags; /* Cached interrupt flags */ uint32_t status; /* Cached status flags */ +#endif +#ifdef CONFIG_UART_ASYNC_API + struct max32_uart_async_data async; #endif struct uart_config conf; /* baudrate, stopbits, ... */ }; @@ -207,11 +252,19 @@ static int api_config_get(const struct device *dev, struct uart_config *uart_cfg #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ +#ifdef CONFIG_UART_ASYNC_API +static void uart_max32_async_tx_timeout(struct k_work *work); +static void uart_max32_async_rx_timeout(struct k_work *work); +#endif /* CONFIG_UART_ASYNC_API */ + static int uart_max32_init(const struct device *dev) { int ret; const struct max32_uart_config *const cfg = dev->config; mxc_uart_regs_t *regs = cfg->regs; +#ifdef CONFIG_UART_ASYNC_API + struct max32_uart_data *data = dev->data; +#endif if (!device_is_ready(cfg->clock)) { LOG_ERR("Clock control device not ready"); @@ -244,12 +297,20 @@ static int uart_max32_init(const struct device *dev) return ret; } -#ifdef CONFIG_UART_INTERRUPT_DRIVEN +#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) /* Clear any pending UART RX/TX interrupts */ MXC_UART_ClearFlags(regs, (ADI_MAX32_UART_INT_RX | ADI_MAX32_UART_INT_TX)); cfg->irq_config_func(dev); #endif +#ifdef CONFIG_UART_ASYNC_API + data->async.uart_dev = dev; + k_work_init_delayable(&data->async.tx.timeout_work, uart_max32_async_tx_timeout); + k_work_init_delayable(&data->async.rx.timeout_work, uart_max32_async_rx_timeout); + data->async.rx.len = 0; + data->async.rx.offset = 0; +#endif + return ret; } @@ -307,20 +368,6 @@ static int api_irq_tx_ready(const struct device *dev) !(data->status & MXC_F_UART_STATUS_TX_FULL)); } -static void api_irq_rx_enable(const struct device *dev) -{ - const struct max32_uart_config *cfg = dev->config; - - MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_RX); -} - -static void api_irq_rx_disable(const struct device *dev) -{ - const struct max32_uart_config *cfg = dev->config; - - MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_RX); -} - static int api_irq_tx_complete(const struct device *dev) { const struct max32_uart_config *cfg = dev->config; @@ -378,22 +425,459 @@ static int api_irq_update(const struct device *dev) static void api_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { - struct max32_uart_data *const data = dev->data; + struct max32_uart_data *const dev_data = dev->data; - data->cb = cb; - data->cb_data = cb_data; + dev_data->cb = cb; + dev_data->cb_data = cb_data; +} + +#endif /* CONFIG_UART_INTERRUPT_DRIVEN */ + +#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) +static void api_irq_rx_enable(const struct device *dev) +{ + const struct max32_uart_config *cfg = dev->config; + + MXC_UART_EnableInt(cfg->regs, ADI_MAX32_UART_INT_RX); +} + +static void api_irq_rx_disable(const struct device *dev) +{ + const struct max32_uart_config *cfg = dev->config; + + MXC_UART_DisableInt(cfg->regs, ADI_MAX32_UART_INT_RX); } static void uart_max32_isr(const struct device *dev) { struct max32_uart_data *data = dev->data; + const struct max32_uart_config *cfg = dev->config; + uint32_t intfl; + intfl = MXC_UART_GetFlags(cfg->regs); + +#ifdef CONFIG_UART_INTERRUPT_DRIVEN if (data->cb) { data->cb(dev, data->cb_data); } +#endif /* CONFIG_UART_INTERRUPT_DRIVEN */ + +#ifdef CONFIG_UART_ASYNC_API + if (data->async.rx.timeout != SYS_FOREVER_US && data->async.rx.timeout != 0 && + (intfl & ADI_MAX32_UART_INT_RX)) { + k_work_reschedule(&data->async.rx.timeout_work, K_USEC(data->async.rx.timeout)); + } +#endif /* CONFIG_UART_ASYNC_API */ + + /* Clear RX/TX interrupts flag after cb is called */ + MXC_UART_ClearFlags(cfg->regs, intfl); +} +#endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API */ + +#if defined(CONFIG_UART_ASYNC_API) + +static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout) +{ + if ((timeout != SYS_FOREVER_US) && (timeout != 0)) { + k_work_reschedule(work, K_USEC(timeout)); + } } -#endif /* CONFIG_UART_INTERRUPT_DRIVEN */ +static void async_user_callback(const struct device *dev, struct uart_event *evt) +{ + const struct max32_uart_data *data = dev->data; + + if (data->async.cb) { + data->async.cb(dev, evt, data->async.user_data); + } +} + +static void uart_max32_async_tx_callback(const struct device *dma_dev, void *user_data, + uint32_t channel, int status) +{ + const struct device *dev = user_data; + const struct max32_uart_config *config = dev->config; + struct max32_uart_data *data = dev->data; + struct dma_status dma_stat; + + unsigned int key = irq_lock(); + + dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat); + /* Skip callback if channel is still busy */ + if (dma_stat.busy) { + irq_unlock(key); + return; + } + + k_work_cancel_delayable(&data->async.tx.timeout_work); + Wrap_MXC_UART_DisableTxDMA(config->regs); + + irq_unlock(key); + + struct uart_event tx_done = { + .type = status == 0 ? UART_TX_DONE : UART_TX_ABORTED, + .data.tx.buf = data->async.tx.buf, + .data.tx.len = data->async.tx.len, + }; + async_user_callback(dev, &tx_done); +} + +static int api_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) +{ + struct max32_uart_data *data = dev->data; + + data->async.cb = callback; + data->async.user_data = user_data; + + return 0; +} + +static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) +{ + struct max32_uart_data *data = dev->data; + const struct max32_uart_config *config = dev->config; + struct dma_status dma_stat; + struct dma_config dma_cfg = {0}; + struct dma_block_config dma_blk = {0}; + int ret; + unsigned int key = irq_lock(); + + if (config->tx_dma.channel == 0xFF) { + LOG_ERR("Tx DMA channel is not configured"); + irq_unlock(key); + return -ENOTSUP; + } + + ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat); + if (ret < 0 || dma_stat.busy) { + LOG_ERR("DMA Tx %s", ret < 0 ? "error" : "busy"); + irq_unlock(key); + return ret < 0 ? ret : -EBUSY; + } + + data->async.tx.buf = buf; + data->async.tx.len = len; + + dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; + dma_cfg.dma_callback = uart_max32_async_tx_callback; + dma_cfg.user_data = (void *)dev; + dma_cfg.dma_slot = config->tx_dma.slot; + dma_cfg.block_count = 1; + dma_cfg.source_data_size = 1U; + dma_cfg.source_burst_length = 1U; + dma_cfg.dest_data_size = 1U; + dma_cfg.head_block = &dma_blk; + dma_blk.block_size = len; + dma_blk.source_address = (uint32_t)buf; + + ret = dma_config(config->tx_dma.dev, config->tx_dma.channel, &dma_cfg); + if (ret < 0) { + LOG_ERR("Error configuring Tx DMA (%d)", ret); + irq_unlock(key); + return ret; + } + + ret = dma_start(config->tx_dma.dev, config->tx_dma.channel); + if (ret < 0) { + LOG_ERR("Error starting Tx DMA (%d)", ret); + irq_unlock(key); + return ret; + } + + async_timer_start(&data->async.tx.timeout_work, timeout); + + Wrap_MXC_UART_SetTxDMALevel(config->regs, 2); + Wrap_MXC_UART_EnableTxDMA(config->regs); + + irq_unlock(key); + + return ret; +} + +static int api_tx_abort(const struct device *dev) +{ + int ret; + struct max32_uart_data *data = dev->data; + const struct max32_uart_config *config = dev->config; + struct dma_status dma_stat; + size_t bytes_sent; + + unsigned int key = irq_lock(); + + k_work_cancel_delayable(&data->async.tx.timeout_work); + + Wrap_MXC_UART_DisableTxDMA(config->regs); + + ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &dma_stat); + if (!dma_stat.busy) { + irq_unlock(key); + return 0; + } + + bytes_sent = (ret == 0) ? (data->async.tx.len - dma_stat.pending_length) : 0; + + ret = dma_stop(config->tx_dma.dev, config->tx_dma.channel); + + irq_unlock(key); + + if (ret == 0) { + struct uart_event tx_aborted = { + .type = UART_TX_ABORTED, + .data.tx.buf = data->async.tx.buf, + .data.tx.len = bytes_sent, + }; + async_user_callback(dev, &tx_aborted); + } + + return 0; +} + +static void uart_max32_async_tx_timeout(struct k_work *work) +{ + struct k_work_delayable *dwork = k_work_delayable_from_work(work); + struct max32_uart_async_tx *tx = + CONTAINER_OF(dwork, struct max32_uart_async_tx, timeout_work); + struct max32_uart_async_data *async = CONTAINER_OF(tx, struct max32_uart_async_data, tx); + struct max32_uart_data *data = CONTAINER_OF(async, struct max32_uart_data, async); + + api_tx_abort(data->async.uart_dev); +} + +static int api_rx_disable(const struct device *dev) +{ + struct max32_uart_data *data = dev->data; + const struct max32_uart_config *config = dev->config; + int ret; + unsigned int key = irq_lock(); + + k_work_cancel_delayable(&data->async.rx.timeout_work); + + Wrap_MXC_UART_DisableRxDMA(config->regs); + + ret = dma_stop(config->rx_dma.dev, config->rx_dma.channel); + if (ret) { + LOG_ERR("Error stopping Rx DMA (%d)", ret); + irq_unlock(key); + return ret; + } + + api_irq_rx_disable(dev); + + irq_unlock(key); + + /* Release current buffer event */ + struct uart_event rel_event = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = data->async.rx.buf, + }; + async_user_callback(dev, &rel_event); + + /* Disable RX event */ + struct uart_event rx_disabled = {.type = UART_RX_DISABLED}; + + async_user_callback(dev, &rx_disabled); + + data->async.rx.buf = NULL; + data->async.rx.len = 0; + data->async.rx.counter = 0; + data->async.rx.offset = 0; + + if (data->async.rx.next_buf) { + /* Release next buffer event */ + struct uart_event next_rel_event = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = data->async.rx.next_buf, + }; + async_user_callback(dev, &next_rel_event); + data->async.rx.next_buf = NULL; + data->async.rx.next_len = 0; + } + + return 0; +} + +static void uart_max32_async_rx_callback(const struct device *dma_dev, void *user_data, + uint32_t channel, int status) +{ + const struct device *dev = user_data; + const struct max32_uart_config *config = dev->config; + struct max32_uart_data *data = dev->data; + struct max32_uart_async_data *async = &data->async; + struct dma_status dma_stat; + size_t total_rx; + + unsigned int key = irq_lock(); + + dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat); + total_rx = async->rx.len - dma_stat.pending_length; + + api_irq_rx_disable(dev); + + irq_unlock(key); + + if (total_rx > async->rx.offset) { + async->rx.counter = total_rx - async->rx.offset; + + struct uart_event rdy_event = { + .type = UART_RX_RDY, + .data.rx.buf = async->rx.buf, + .data.rx.len = async->rx.counter, + .data.rx.offset = async->rx.offset, + }; + async_user_callback(dev, &rdy_event); + } + + if (async->rx.next_buf) { + async->rx.offset = 0; + async->rx.counter = 0; + + struct uart_event rel_event = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = async->rx.buf, + }; + async_user_callback(dev, &rel_event); + + async->rx.buf = async->rx.next_buf; + async->rx.len = async->rx.next_len; + + async->rx.next_buf = NULL; + async->rx.next_len = 0; + struct uart_event req_event = { + .type = UART_RX_BUF_REQUEST, + }; + async_user_callback(dev, &req_event); + + dma_reload(config->rx_dma.dev, config->rx_dma.channel, config->rx_dma.slot, + (uint32_t)async->rx.buf, async->rx.len); + dma_start(config->rx_dma.dev, config->rx_dma.channel); + + api_irq_rx_enable(dev); + async_timer_start(&async->rx.timeout_work, async->rx.timeout); + } else { + api_rx_disable(dev); + } +} + +static int api_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) +{ + struct max32_uart_data *data = dev->data; + const struct max32_uart_config *config = dev->config; + struct dma_status dma_stat; + struct dma_config dma_cfg = {0}; + struct dma_block_config dma_blk = {0}; + int ret; + + unsigned int key = irq_lock(); + + if (config->rx_dma.channel == 0xFF) { + LOG_ERR("Rx DMA channel is not configured"); + irq_unlock(key); + return -ENOTSUP; + } + + ret = dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat); + if (ret < 0 || dma_stat.busy) { + LOG_ERR("DMA Rx %s", ret < 0 ? "error" : "busy"); + irq_unlock(key); + return ret < 0 ? ret : -EBUSY; + } + + data->async.rx.buf = buf; + data->async.rx.len = len; + + dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; + dma_cfg.dma_callback = uart_max32_async_rx_callback; + dma_cfg.user_data = (void *)dev; + dma_cfg.dma_slot = config->rx_dma.slot; + dma_cfg.block_count = 1; + dma_cfg.source_data_size = 1U; + dma_cfg.source_burst_length = 1U; + dma_cfg.dest_data_size = 1U; + dma_cfg.head_block = &dma_blk; + dma_blk.block_size = len; + dma_blk.dest_address = (uint32_t)buf; + + ret = dma_config(config->rx_dma.dev, config->rx_dma.channel, &dma_cfg); + if (ret < 0) { + LOG_ERR("Error configuring Rx DMA (%d)", ret); + irq_unlock(key); + return ret; + } + + ret = dma_start(config->rx_dma.dev, config->rx_dma.channel); + if (ret < 0) { + LOG_ERR("Error starting Rx DMA (%d)", ret); + irq_unlock(key); + return ret; + } + + data->async.rx.timeout = timeout; + + Wrap_MXC_UART_SetRxDMALevel(config->regs, 1); + Wrap_MXC_UART_EnableRxDMA(config->regs); + + struct uart_event buf_req = { + .type = UART_RX_BUF_REQUEST, + }; + + async_user_callback(dev, &buf_req); + + api_irq_rx_enable(dev); + async_timer_start(&data->async.rx.timeout_work, timeout); + + irq_unlock(key); + return ret; +} + +static int api_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) +{ + struct max32_uart_data *data = dev->data; + + data->async.rx.next_buf = buf; + data->async.rx.next_len = len; + + return 0; +} + +static void uart_max32_async_rx_timeout(struct k_work *work) +{ + struct k_work_delayable *dwork = k_work_delayable_from_work(work); + struct max32_uart_async_rx *rx = + CONTAINER_OF(dwork, struct max32_uart_async_rx, timeout_work); + struct max32_uart_async_data *async = CONTAINER_OF(rx, struct max32_uart_async_data, rx); + struct max32_uart_data *data = CONTAINER_OF(async, struct max32_uart_data, async); + const struct max32_uart_config *config = data->async.uart_dev->config; + struct dma_status dma_stat; + uint32_t total_rx; + + unsigned int key = irq_lock(); + + dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &dma_stat); + + api_irq_rx_disable(data->async.uart_dev); + k_work_cancel_delayable(&data->async.rx.timeout_work); + + irq_unlock(key); + + total_rx = async->rx.len - dma_stat.pending_length; + + if (total_rx > async->rx.offset) { + async->rx.counter = total_rx - async->rx.offset; + struct uart_event rdy_event = { + .type = UART_RX_RDY, + .data.rx.buf = async->rx.buf, + .data.rx.len = async->rx.counter, + .data.rx.offset = async->rx.offset, + }; + async_user_callback(async->uart_dev, &rdy_event); + } + async->rx.offset += async->rx.counter; + async->rx.counter = 0; + + api_irq_rx_enable(data->async.uart_dev); +} + +#endif static DEVICE_API(uart, uart_max32_driver_api) = { .poll_in = api_poll_in, @@ -419,12 +903,46 @@ static DEVICE_API(uart, uart_max32_driver_api) = { .irq_update = api_irq_update, .irq_callback_set = api_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ +#ifdef CONFIG_UART_ASYNC_API + .callback_set = api_callback_set, + .tx = api_tx, + .tx_abort = api_tx_abort, + .rx_enable = api_rx_enable, + .rx_buf_rsp = api_rx_buf_rsp, + .rx_disable = api_rx_disable, +#endif /* CONFIG_UART_ASYNC_API */ }; +#ifdef CONFIG_UART_ASYNC_API +#define MAX32_DT_INST_DMA_CTLR(n, name) \ + COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \ + (DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, name))), (NULL)) + +#define MAX32_DT_INST_DMA_CELL(n, name, cell) \ + COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), (DT_INST_DMAS_CELL_BY_NAME(n, name, cell)), \ + (0xff)) + +#define MAX32_UART_DMA_INIT(n) \ + .tx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, tx), \ + .tx_dma.channel = MAX32_DT_INST_DMA_CELL(n, tx, channel), \ + .tx_dma.slot = MAX32_DT_INST_DMA_CELL(n, tx, slot), \ + .rx_dma.dev = MAX32_DT_INST_DMA_CTLR(n, rx), \ + .rx_dma.channel = MAX32_DT_INST_DMA_CELL(n, rx, channel), \ + .rx_dma.slot = MAX32_DT_INST_DMA_CELL(n, rx, slot), +#else +#define MAX32_UART_DMA_INIT(n) +#endif + +#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) +#define MAX32_UART_USE_IRQ 1 +#else +#define MAX32_UART_USE_IRQ 0 +#endif + #define MAX32_UART_INIT(_num) \ PINCTRL_DT_INST_DEFINE(_num); \ - IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ - (static void uart_max32_irq_init_##_num(const struct device *dev) \ + IF_ENABLED(MAX32_UART_USE_IRQ, \ + (static void uart_max32_irq_init_##_num(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(_num), DT_INST_IRQ(_num, priority), \ uart_max32_isr, DEVICE_DT_INST_GET(_num), 0); \ @@ -443,9 +961,9 @@ static DEVICE_API(uart, uart_max32_driver_api) = { .uart_conf.data_bits = DT_INST_ENUM_IDX(_num, data_bits), \ .uart_conf.stop_bits = DT_INST_ENUM_IDX(_num, stop_bits), \ .uart_conf.flow_ctrl = \ - DT_INST_PROP_OR(_num, hw_flow_control, UART_CFG_FLOW_CTRL_NONE), \ - IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ - (.irq_config_func = uart_max32_irq_init_##_num,))}; \ + DT_INST_PROP_OR(index, hw_flow_control, UART_CFG_FLOW_CTRL_NONE), \ + MAX32_UART_DMA_INIT(_num) IF_ENABLED( \ + MAX32_UART_USE_IRQ, (.irq_config_func = uart_max32_irq_init_##_num,))}; \ static struct max32_uart_data max32_uart_data##_num = { \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.cb = NULL,))}; \ DEVICE_DT_INST_DEFINE(_num, uart_max32_init, NULL, &max32_uart_data##_num, \ diff --git a/dts/bindings/serial/adi,max32-uart.yaml b/dts/bindings/serial/adi,max32-uart.yaml index 0b106b24ac8..07ebaffc80b 100644 --- a/dts/bindings/serial/adi,max32-uart.yaml +++ b/dts/bindings/serial/adi,max32-uart.yaml @@ -49,3 +49,19 @@ properties: description: | Sets the number of data bits. Defaults to standard of 8 if not specified. default: 8 + + dmas: + description: | + DMA configuration used by asynchronous UART. Consists of a DMA instance, + channel number and a matching DMA slot. + + For example dmas for TX, RX on UART2 + dmas = <&dma0 1 MAX32_DMA_SLOT_UART2_TX>, <&dma0 2 MAX32_DMA_SLOT_UART2_RX>; + + dma-names: + description: | + Required if the dmas property exists. This should be "tx" and "rx" + to match the dmas property. + + For example + dma-names = "tx", "rx";