drivers: dma: esp32c3: add gdma driver
Add GDMA driver for esp32c3. Signed-off-by: Lucas Tamborrino <lucas.tamborrino@espressif.com>
This commit is contained in:
parent
9421785fd0
commit
17fbdc55b5
5 changed files with 627 additions and 0 deletions
|
|
@ -24,3 +24,4 @@ zephyr_library_sources_ifdef(CONFIG_DMA_INTEL_ADSP_HDA_LINK_IN dma_intel_adsp_hd
|
|||
zephyr_library_sources_ifdef(CONFIG_DMA_INTEL_ADSP_HDA_LINK_OUT dma_intel_adsp_hda_link_out.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_DMA_INTEL_ADSP_GPDMA dma_intel_adsp_gpdma.c dma_dw_common.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_DMA_GD32 dma_gd32.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_DMA_ESP32 dma_esp32_gdma.c)
|
||||
|
|
|
|||
|
|
@ -50,4 +50,6 @@ source "drivers/dma/Kconfig.intel_adsp_hda"
|
|||
|
||||
source "drivers/dma/Kconfig.gd32"
|
||||
|
||||
source "drivers/dma/Kconfig.esp32"
|
||||
|
||||
endif # DMA
|
||||
|
|
|
|||
9
drivers/dma/Kconfig.esp32
Normal file
9
drivers/dma/Kconfig.esp32
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) 2022 Espressif Systems (Shanghai) Co., Ltd.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
config DMA_ESP32
|
||||
bool "ESP32 General Purpose DMA driver"
|
||||
depends on DT_HAS_ESPRESSIF_ESP32_GDMA_ENABLED
|
||||
default y
|
||||
help
|
||||
General Purpose DMA for ESP32 series.
|
||||
583
drivers/dma/dma_esp32_gdma.c
Normal file
583
drivers/dma/dma_esp32_gdma.c
Normal file
|
|
@ -0,0 +1,583 @@
|
|||
/*
|
||||
* Copyright (c) 2022 Espressif Systems (Shanghai) Co., Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#define DT_DRV_COMPAT espressif_esp32_gdma
|
||||
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_REGISTER(dma_esp32_gdma, CONFIG_DMA_LOG_LEVEL);
|
||||
|
||||
#include <hal/gdma_hal.h>
|
||||
#include <hal/gdma_ll.h>
|
||||
#include <gdma_channel.h>
|
||||
#include <hal/dma_types.h>
|
||||
|
||||
#include <soc.h>
|
||||
#include <soc/soc_memory_types.h>
|
||||
#include <errno.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/drivers/dma.h>
|
||||
#include <zephyr/drivers/dma/dma_esp32.h>
|
||||
#include <zephyr/drivers/interrupt_controller/intc_esp32c3.h>
|
||||
#include <zephyr/drivers/clock_control.h>
|
||||
|
||||
#define DMA_MAX_CHANNEL SOC_GDMA_PAIRS_PER_GROUP
|
||||
|
||||
struct dma_esp32_data {
|
||||
gdma_hal_context_t hal;
|
||||
};
|
||||
|
||||
enum dma_channel_dir {
|
||||
DMA_RX,
|
||||
DMA_TX,
|
||||
DMA_UNCONFIGURED
|
||||
};
|
||||
|
||||
struct dma_esp32_channel {
|
||||
uint8_t dir;
|
||||
uint8_t channel_id;
|
||||
int host_id;
|
||||
int periph_id;
|
||||
dma_callback_t cb;
|
||||
void *user_data;
|
||||
dma_descriptor_t desc;
|
||||
uint8_t *buf_temp;
|
||||
uint8_t *buf_original;
|
||||
};
|
||||
|
||||
struct dma_esp32_config {
|
||||
int *irq_src;
|
||||
uint8_t dma_channel_max;
|
||||
uint8_t sram_alignment;
|
||||
struct dma_esp32_channel dma_channel[DMA_MAX_CHANNEL * 2];
|
||||
void (*config_irq)(const struct device *dev);
|
||||
struct device *src_dev;
|
||||
const struct device *clock_dev;
|
||||
clock_control_subsys_t clock_subsys;
|
||||
};
|
||||
|
||||
static void IRAM_ATTR dma_esp32_isr_handle_rx(const struct device *dev,
|
||||
struct dma_esp32_channel *rx, uint32_t intr_status)
|
||||
{
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
|
||||
gdma_ll_rx_clear_interrupt_status(data->hal.dev, rx->channel_id, intr_status);
|
||||
|
||||
if (intr_status & (GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE)) {
|
||||
intr_status &= ~(GDMA_LL_EVENT_RX_SUC_EOF | GDMA_LL_EVENT_RX_DONE);
|
||||
if (rx->buf_temp) {
|
||||
memcpy(rx->buf_original, rx->buf_temp, rx->desc.dw0.length);
|
||||
}
|
||||
}
|
||||
|
||||
if (rx->cb) {
|
||||
rx->cb(dev, rx->user_data, rx->channel_id*2, -intr_status);
|
||||
}
|
||||
}
|
||||
|
||||
static void IRAM_ATTR dma_esp32_isr_handle_tx(const struct device *dev,
|
||||
struct dma_esp32_channel *tx, uint32_t intr_status)
|
||||
{
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
|
||||
gdma_ll_tx_clear_interrupt_status(data->hal.dev, tx->channel_id, intr_status);
|
||||
|
||||
intr_status &= ~(GDMA_LL_EVENT_TX_TOTAL_EOF | GDMA_LL_EVENT_TX_DONE | GDMA_LL_EVENT_TX_EOF);
|
||||
|
||||
if (tx->cb) {
|
||||
tx->cb(dev, tx->user_data, tx->channel_id*2 + 1, -intr_status);
|
||||
}
|
||||
}
|
||||
|
||||
static void IRAM_ATTR dma_esp32_isr_handle(const struct device *dev, uint8_t rx_id, uint8_t tx_id)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel_rx = &config->dma_channel[rx_id];
|
||||
struct dma_esp32_channel *dma_channel_tx = &config->dma_channel[tx_id];
|
||||
uint32_t intr_status = 0;
|
||||
|
||||
intr_status = gdma_ll_rx_get_interrupt_status(data->hal.dev, dma_channel_rx->channel_id);
|
||||
if (intr_status) {
|
||||
dma_esp32_isr_handle_rx(dev, dma_channel_rx, intr_status);
|
||||
}
|
||||
|
||||
intr_status = gdma_ll_tx_get_interrupt_status(data->hal.dev, dma_channel_tx->channel_id);
|
||||
if (intr_status) {
|
||||
dma_esp32_isr_handle_tx(dev, dma_channel_tx, intr_status);
|
||||
}
|
||||
}
|
||||
|
||||
static int dma_esp32_config_rx_descriptor(struct dma_esp32_channel *dma_channel,
|
||||
struct dma_block_config *block)
|
||||
{
|
||||
memset(&dma_channel->desc, 0, sizeof(dma_channel->desc));
|
||||
dma_channel->desc.buffer = (void *)block->dest_address;
|
||||
dma_channel->buf_original = (uint8_t *)block->dest_address;
|
||||
if (!esp_ptr_dma_capable((uint32_t *)block->dest_address)) {
|
||||
LOG_DBG("Rx buffer not in DMA capable memory: %p", (uint32_t *)block->dest_address);
|
||||
dma_channel->buf_temp = k_aligned_alloc(32, block->block_size);
|
||||
memset(dma_channel->buf_temp, 0, block->block_size);
|
||||
if (!dma_channel->buf_temp) {
|
||||
LOG_ERR("Not able to allocate mem");
|
||||
return -ENOMEM;
|
||||
}
|
||||
dma_channel->desc.buffer = dma_channel->buf_temp;
|
||||
}
|
||||
|
||||
dma_channel->desc.dw0.size = block->block_size;
|
||||
dma_channel->desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_config_rx(const struct device *dev, struct dma_esp32_channel *dma_channel,
|
||||
struct dma_config *config_dma)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_block_config *block = config_dma->head_block;
|
||||
|
||||
dma_channel->dir = DMA_RX;
|
||||
|
||||
gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
|
||||
|
||||
if (dma_channel->periph_id != SOC_GDMA_TRIG_PERIPH_M2M0) {
|
||||
gdma_ll_rx_connect_to_periph(data->hal.dev, dma_channel->channel_id,
|
||||
dma_channel->periph_id);
|
||||
}
|
||||
|
||||
if (config_dma->dest_burst_length) {
|
||||
/*
|
||||
* RX channel burst mode depends on specific data alignment
|
||||
*/
|
||||
gdma_ll_rx_enable_data_burst(data->hal.dev, dma_channel->channel_id,
|
||||
config->sram_alignment >= 4);
|
||||
gdma_ll_rx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id,
|
||||
config->sram_alignment >= 4);
|
||||
}
|
||||
|
||||
dma_channel->cb = config_dma->dma_callback;
|
||||
dma_channel->user_data = config_dma->user_data;
|
||||
|
||||
gdma_ll_rx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX);
|
||||
gdma_ll_rx_enable_interrupt(data->hal.dev, dma_channel->channel_id, UINT32_MAX,
|
||||
config_dma->dma_callback != NULL);
|
||||
|
||||
return dma_esp32_config_rx_descriptor(dma_channel, config_dma->head_block);
|
||||
}
|
||||
|
||||
static int dma_esp32_config_tx_descriptor(struct dma_esp32_channel *dma_channel,
|
||||
struct dma_block_config *block)
|
||||
{
|
||||
memset(&dma_channel->desc, 0, sizeof(dma_channel->desc));
|
||||
dma_channel->desc.buffer = (void *)block->source_address;
|
||||
if (!esp_ptr_dma_capable((uint32_t *)block->source_address)) {
|
||||
LOG_DBG("Tx buffer not in DMA capable memory");
|
||||
dma_channel->buf_temp = k_malloc(block->block_size);
|
||||
if (!dma_channel->buf_temp) {
|
||||
LOG_ERR("Not able to allocate mem");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(dma_channel->buf_temp, (uint8_t *)block->source_address, block->block_size);
|
||||
dma_channel->buf_original = (uint8_t *)block->source_address;
|
||||
dma_channel->desc.buffer = dma_channel->buf_temp;
|
||||
}
|
||||
|
||||
dma_channel->desc.dw0.size = block->block_size;
|
||||
dma_channel->desc.dw0.length = block->block_size;
|
||||
dma_channel->desc.dw0.suc_eof = 1;
|
||||
dma_channel->desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_config_tx(const struct device *dev, struct dma_esp32_channel *dma_channel,
|
||||
struct dma_config *config_dma)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_block_config *block = config_dma->head_block;
|
||||
|
||||
dma_channel->dir = DMA_TX;
|
||||
|
||||
gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
|
||||
|
||||
if (dma_channel->periph_id != SOC_GDMA_TRIG_PERIPH_M2M0) {
|
||||
gdma_ll_tx_connect_to_periph(data->hal.dev, dma_channel->channel_id,
|
||||
dma_channel->periph_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* TX channel can always enable burst mode, no matter data alignment
|
||||
*/
|
||||
if (config_dma->source_burst_length) {
|
||||
gdma_ll_tx_enable_data_burst(data->hal.dev, dma_channel->channel_id, true);
|
||||
gdma_ll_tx_enable_descriptor_burst(data->hal.dev, dma_channel->channel_id, true);
|
||||
}
|
||||
|
||||
dma_channel->cb = config_dma->dma_callback;
|
||||
dma_channel->user_data = config_dma->user_data;
|
||||
|
||||
gdma_ll_tx_clear_interrupt_status(data->hal.dev, dma_channel->channel_id, UINT32_MAX);
|
||||
|
||||
gdma_ll_tx_enable_interrupt(data->hal.dev, dma_channel->channel_id, GDMA_LL_EVENT_TX_EOF,
|
||||
config_dma->dma_callback != NULL);
|
||||
|
||||
return dma_esp32_config_tx_descriptor(dma_channel, config_dma->head_block);
|
||||
}
|
||||
|
||||
static int dma_esp32_config(const struct device *dev, uint32_t channel,
|
||||
struct dma_config *config_dma)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!config_dma) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (config_dma->source_burst_length != config_dma->dest_burst_length) {
|
||||
LOG_ERR("Source and destination burst lengths must be equal");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dma_channel->periph_id = config_dma->channel_direction == MEMORY_TO_MEMORY
|
||||
? SOC_GDMA_TRIG_PERIPH_M2M0
|
||||
: config_dma->dma_slot;
|
||||
|
||||
dma_channel->channel_id = channel / 2;
|
||||
|
||||
gdma_ll_enable_m2m_mode(data->hal.dev, dma_channel->channel_id,
|
||||
config_dma->channel_direction == MEMORY_TO_MEMORY);
|
||||
|
||||
switch (config_dma->channel_direction) {
|
||||
case MEMORY_TO_MEMORY:
|
||||
/*
|
||||
* Create both Tx and Rx stream on the same channel_id
|
||||
*/
|
||||
struct dma_esp32_channel *dma_channel_rx =
|
||||
&config->dma_channel[dma_channel->channel_id * 2];
|
||||
struct dma_esp32_channel *dma_channel_tx =
|
||||
&config->dma_channel[(dma_channel->channel_id * 2) + 1];
|
||||
|
||||
dma_channel_rx->channel_id = dma_channel->channel_id;
|
||||
dma_channel_tx->channel_id = dma_channel->channel_id;
|
||||
|
||||
dma_channel_rx->periph_id = dma_channel->periph_id;
|
||||
dma_channel_tx->periph_id = dma_channel->periph_id;
|
||||
|
||||
dma_esp32_config_rx(dev, dma_channel_rx, config_dma);
|
||||
dma_esp32_config_tx(dev, dma_channel_tx, config_dma);
|
||||
break;
|
||||
case PERIPHERAL_TO_MEMORY:
|
||||
dma_esp32_config_rx(dev, dma_channel, config_dma);
|
||||
break;
|
||||
case MEMORY_TO_PERIPHERAL:
|
||||
dma_esp32_config_tx(dev, dma_channel, config_dma);
|
||||
break;
|
||||
default:
|
||||
LOG_ERR("Invalid Channel direction");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_start(const struct device *dev, uint32_t channel)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (esp_intr_enable(config->irq_src[dma_channel->channel_id])) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
|
||||
struct dma_esp32_channel *dma_channel_rx =
|
||||
&config->dma_channel[dma_channel->channel_id * 2];
|
||||
struct dma_esp32_channel *dma_channel_tx =
|
||||
&config->dma_channel[(dma_channel->channel_id * 2) + 1];
|
||||
|
||||
gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
|
||||
(int32_t)&dma_channel_rx->desc);
|
||||
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
|
||||
|
||||
gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
|
||||
(int32_t)&dma_channel_tx->desc);
|
||||
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
|
||||
} else {
|
||||
if (dma_channel->dir == DMA_RX) {
|
||||
gdma_ll_rx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
|
||||
(int32_t)&dma_channel->desc);
|
||||
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
|
||||
} else if (dma_channel->dir == DMA_TX) {
|
||||
gdma_ll_tx_set_desc_addr(data->hal.dev, dma_channel->channel_id,
|
||||
(int32_t)&dma_channel->desc);
|
||||
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
|
||||
} else {
|
||||
LOG_ERR("Channel %d is not configured", channel);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_suspend(const struct device *dev, uint32_t channel)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
|
||||
gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
|
||||
gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
|
||||
}
|
||||
|
||||
if (dma_channel->dir == DMA_RX) {
|
||||
gdma_ll_rx_stop(data->hal.dev, dma_channel->channel_id);
|
||||
} else if (dma_channel->dir == DMA_TX) {
|
||||
gdma_ll_tx_stop(data->hal.dev, dma_channel->channel_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_stop(const struct device *dev, uint32_t channel)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (esp_intr_disable(config->irq_src[dma_channel->channel_id])) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dma_esp32_suspend(dev, channel);
|
||||
}
|
||||
|
||||
static int dma_esp32_resume(const struct device *dev, uint32_t channel)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dma_channel->periph_id == SOC_GDMA_TRIG_PERIPH_M2M0) {
|
||||
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
|
||||
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (dma_channel->dir == DMA_RX) {
|
||||
gdma_ll_rx_start(data->hal.dev, dma_channel->channel_id);
|
||||
} else if (dma_channel->dir == DMA_TX) {
|
||||
gdma_ll_tx_start(data->hal.dev, dma_channel->channel_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_get_status(const struct device *dev, uint32_t channel,
|
||||
struct dma_status *status)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!status) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dma_channel->dir == DMA_RX) {
|
||||
status->busy = !gdma_ll_rx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
|
||||
status->dir = PERIPHERAL_TO_MEMORY;
|
||||
status->read_position = dma_channel->desc.dw0.length;
|
||||
} else if (dma_channel->dir == DMA_TX) {
|
||||
status->busy = !gdma_ll_tx_is_fsm_idle(data->hal.dev, dma_channel->channel_id);
|
||||
status->dir = MEMORY_TO_PERIPHERAL;
|
||||
status->write_position = dma_channel->desc.dw0.length;
|
||||
status->total_copied = dma_channel->desc.dw0.length;
|
||||
status->pending_length = dma_channel->desc.dw0.size - dma_channel->desc.dw0.length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_reload(const struct device *dev, uint32_t channel, uint32_t src, uint32_t dst,
|
||||
size_t size)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *const)(dev)->data;
|
||||
struct dma_esp32_channel *dma_channel = &config->dma_channel[channel];
|
||||
struct dma_block_config block = {0};
|
||||
int err = 0;
|
||||
|
||||
if (channel >= config->dma_channel_max) {
|
||||
LOG_ERR("Unsupported channel");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dma_channel->dir == DMA_RX) {
|
||||
gdma_ll_rx_reset_channel(data->hal.dev, dma_channel->channel_id);
|
||||
block.block_size = size;
|
||||
block.dest_address = dst;
|
||||
err = dma_esp32_config_rx_descriptor(dma_channel, &block);
|
||||
if (err) {
|
||||
LOG_ERR("Error reloading RX channel (%d)", err);
|
||||
return err;
|
||||
}
|
||||
} else if (dma_channel->dir == DMA_TX) {
|
||||
gdma_ll_tx_reset_channel(data->hal.dev, dma_channel->channel_id);
|
||||
block.block_size = size;
|
||||
block.source_address = src;
|
||||
err = dma_esp32_config_tx_descriptor(dma_channel, &block);
|
||||
if (err) {
|
||||
LOG_ERR("Error reloading TX channel (%d)", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_esp32_init(const struct device *dev)
|
||||
{
|
||||
struct dma_esp32_config *config = (struct dma_esp32_config *)dev->config;
|
||||
struct dma_esp32_data *data = (struct dma_esp32_data *)dev->data;
|
||||
struct dma_esp32_channel *dma_channel;
|
||||
int ret = 0;
|
||||
|
||||
if (!device_is_ready(config->clock_dev)) {
|
||||
LOG_ERR("clock control device not ready");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = clock_control_on(config->clock_dev, config->clock_subsys);
|
||||
if (ret < 0) {
|
||||
LOG_ERR("Could not initialize clock (%d)", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
config->config_irq(dev);
|
||||
|
||||
for (uint8_t i = 0; i < DMA_MAX_CHANNEL * 2; i++) {
|
||||
dma_channel = &config->dma_channel[i];
|
||||
dma_channel->buf_original = NULL;
|
||||
dma_channel->buf_temp = NULL;
|
||||
dma_channel->cb = NULL;
|
||||
dma_channel->dir = DMA_UNCONFIGURED;
|
||||
dma_channel->periph_id = GDMA_TRIG_PERIPH_INVALID;
|
||||
memset(&dma_channel->desc, 0, sizeof(dma_descriptor_t));
|
||||
}
|
||||
|
||||
gdma_hal_init(&data->hal, 0);
|
||||
gdma_ll_enable_clock(data->hal.dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dma_driver_api dma_esp32_api = {
|
||||
.config = dma_esp32_config,
|
||||
.start = dma_esp32_start,
|
||||
.stop = dma_esp32_stop,
|
||||
.suspend = dma_esp32_suspend,
|
||||
.resume = dma_esp32_resume,
|
||||
.get_status = dma_esp32_get_status,
|
||||
.reload = dma_esp32_reload,
|
||||
};
|
||||
|
||||
#define DMA_ESP32_DEFINE_IRQ_HANDLER(channel) \
|
||||
static void IRAM_ATTR dma_esp32_isr_##channel(const struct device *dev) \
|
||||
{ \
|
||||
dma_esp32_isr_handle(dev, channel * 2, channel * 2 + 1); \
|
||||
}
|
||||
|
||||
DMA_ESP32_DEFINE_IRQ_HANDLER(0)
|
||||
DMA_ESP32_DEFINE_IRQ_HANDLER(1)
|
||||
DMA_ESP32_DEFINE_IRQ_HANDLER(2)
|
||||
#if DMA_MAX_CHANNEL >= 5
|
||||
DMA_ESP32_DEFINE_IRQ_HANDLER(3)
|
||||
DMA_ESP32_DEFINE_IRQ_HANDLER(4)
|
||||
#endif
|
||||
|
||||
#define DMA_ESP32_CONNECT_IRQ(idx, channel, dev) \
|
||||
do { \
|
||||
esp_intr_alloc(DT_INST_IRQ_BY_IDX(idx, channel, irq), 0, \
|
||||
(isr_handler_t)dma_esp32_isr_##channel, (void *)dev, NULL); \
|
||||
esp_intr_disable(DT_INST_IRQ_BY_IDX(idx, channel, irq)); \
|
||||
} while (false)
|
||||
|
||||
#define DMA_ESP32_CONNECT_IRQ_COND(idx, channel, dev) \
|
||||
COND_CODE_1(DT_INST_IRQ_HAS_IDX(idx, channel), (DMA_ESP32_CONNECT_IRQ(idx, channel, dev)), \
|
||||
())
|
||||
|
||||
#define DMA_ESP32_DEFINE_IRQ_CONFIG(idx) \
|
||||
static void dma_esp32_config_irq_##idx(const struct device *dev) \
|
||||
{ \
|
||||
DMA_ESP32_CONNECT_IRQ_COND(idx, 0, dev); \
|
||||
DMA_ESP32_CONNECT_IRQ_COND(idx, 1, dev); \
|
||||
DMA_ESP32_CONNECT_IRQ_COND(idx, 2, dev); \
|
||||
DMA_ESP32_CONNECT_IRQ_COND(idx, 3, dev); \
|
||||
DMA_ESP32_CONNECT_IRQ_COND(idx, 4, dev); \
|
||||
}
|
||||
|
||||
#define DMA_ESP32_IRQ_NUM(node, prop, idx) DT_PROP_BY_IDX(node, interrupts, idx),
|
||||
|
||||
#define DMA_ESP32_INIT(idx) \
|
||||
DMA_ESP32_DEFINE_IRQ_CONFIG(idx) \
|
||||
static int irq_numbers[] = { \
|
||||
DT_INST_FOREACH_PROP_ELEM(idx, interrupts, DMA_ESP32_IRQ_NUM)}; \
|
||||
static struct dma_esp32_config dma_config_##idx = { \
|
||||
.config_irq = dma_esp32_config_irq_##idx, \
|
||||
.irq_src = irq_numbers, \
|
||||
.dma_channel_max = DT_INST_PROP(idx, dma_channels), \
|
||||
.sram_alignment = DT_INST_PROP(idx, dma_buf_addr_alignment), \
|
||||
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \
|
||||
.clock_subsys = (void *)DT_INST_CLOCKS_CELL(idx, offset), \
|
||||
}; \
|
||||
static struct dma_esp32_data dma_data_##idx = { \
|
||||
.hal = \
|
||||
{ \
|
||||
.dev = (gdma_dev_t *)DT_INST_REG_ADDR(idx), \
|
||||
}, \
|
||||
}; \
|
||||
\
|
||||
DEVICE_DT_INST_DEFINE(idx, &dma_esp32_init, NULL, &dma_data_##idx, &dma_config_##idx, \
|
||||
PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, &dma_esp32_api);
|
||||
|
||||
DT_INST_FOREACH_STATUS_OKAY(DMA_ESP32_INIT)
|
||||
32
include/zephyr/drivers/dma/dma_esp32.h
Normal file
32
include/zephyr/drivers/dma/dma_esp32.h
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (c) 2022 Espressif Systems (Shanghai) Co., Ltd.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_DRIVERS_DMA_ESP32_H_
|
||||
#define ZEPHYR_INCLUDE_DRIVERS_DMA_ESP32_H_
|
||||
|
||||
enum gdma_trigger_peripheral {
|
||||
GDMA_TRIG_PERIPH_M2M = -1,
|
||||
GDMA_TRIG_PERIPH_SPI2 = 0,
|
||||
GDMA_TRIG_PERIPH_UHCI0 = 2,
|
||||
GDMA_TRIG_PERIPH_I2S = 4,
|
||||
GDMA_TRIG_PERIPH_AES = 6,
|
||||
GDMA_TRIG_PERIPH_SHA = 7,
|
||||
GDMA_TRIG_PERIPH_ADC = 8,
|
||||
GDMA_TRIG_PERIPH_INVALID = 0x3F,
|
||||
};
|
||||
|
||||
#define ESP32_DT_INST_DMA_CTLR(n, name) \
|
||||
COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
|
||||
(DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, name))), \
|
||||
(NULL))
|
||||
|
||||
#define ESP32_DT_INST_DMA_CELL(n, name, cell) \
|
||||
COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
|
||||
(DT_INST_DMAS_CELL_BY_NAME(n, name, cell)), \
|
||||
(0xff))
|
||||
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_DRIVERS_DMA_ESP32_H_ */
|
||||
Loading…
Reference in a new issue