spi: sam: Refactor driver to use SPI RTIO common APIs

- Following similar approach followed on spi_mcux_lpspi driver.
- Enabling DMA by default when SPI RTIO is selected to favor
non-blocking transfers.

Signed-off-by: Luis Ubieda <luisf@croxel.com>
This commit is contained in:
Luis Ubieda 2024-08-17 12:58:41 -04:00 committed by Anas Nashif
parent cfe64f7f1c
commit 1fdf6e64fb
2 changed files with 57 additions and 93 deletions

View file

@ -15,6 +15,7 @@ config SPI_SAM
if SPI_SAM
config SPI_SAM_DMA
bool "SPI SAM DMA Support"
default y if SPI_RTIO
select DMA
help
Enable using DMA with SPI for SPI instances that enable dma channels in

View file

@ -53,12 +53,7 @@ struct spi_sam_data {
struct k_spinlock lock;
#ifdef CONFIG_SPI_RTIO
struct rtio *r; /* context for thread calls */
struct mpsc io_q;
struct rtio_iodev iodev;
struct rtio_iodev_sqe *txn_head;
struct rtio_iodev_sqe *txn_curr;
struct spi_dt_spec dt_spec;
struct spi_rtio *rtio_ctx;
#endif
#ifdef CONFIG_SPI_SAM_DMA
@ -305,7 +300,9 @@ static void dma_callback(const struct device *dma_dev, void *user_data,
struct spi_sam_data *drv_data = dev->data;
#ifdef CONFIG_SPI_RTIO
if (drv_data->txn_head != NULL) {
struct spi_rtio *rtio_ctx = drv_data->rtio_ctx;
if (rtio_ctx->txn_head != NULL) {
spi_sam_iodev_complete(dev, status);
return;
}
@ -324,7 +321,8 @@ static int spi_sam_dma_txrx(const struct device *dev,
const struct spi_sam_config *drv_cfg = dev->config;
struct spi_sam_data *drv_data = dev->data;
#ifdef CONFIG_SPI_RTIO
bool blocking = drv_data->txn_head == NULL;
struct spi_rtio *rtio_ctx = drv_data->rtio_ctx;
bool blocking = rtio_ctx->txn_head == NULL;
#else
bool blocking = true;
#endif
@ -447,10 +445,12 @@ static inline int spi_sam_rx(const struct device *dev,
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
if ((rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
!IS_ENABLED(CONFIG_SPI_RTIO)) {
key = spi_spin_lock(dev);
spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
} else {
/* RTIO Transfers should always fall here */
return spi_sam_dma_txrx(dev, regs, NULL, rx_buf, rx_buf_len);
}
#else
@ -473,10 +473,12 @@ static inline int spi_sam_tx(const struct device *dev,
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
if ((tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
!IS_ENABLED(CONFIG_SPI_RTIO)) {
key = spi_spin_lock(dev);
spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
} else {
/* RTIO Transfers should always fall here */
return spi_sam_dma_txrx(dev, regs, tx_buf, NULL, tx_buf_len);
}
#else
@ -500,10 +502,12 @@ static inline int spi_sam_txrx(const struct device *dev,
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
if ((buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) &&
!IS_ENABLED(CONFIG_SPI_RTIO)) {
key = spi_spin_lock(dev);
spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
} else {
/* RTIO Transfers should always fall here */
return spi_sam_dma_txrx(dev, regs, tx_buf, rx_buf, buf_len);
}
#else
@ -647,13 +651,13 @@ static bool spi_sam_is_regular(const struct spi_buf_set *tx_bufs,
#else
static void spi_sam_iodev_complete(const struct device *dev, int status);
static void spi_sam_iodev_next(const struct device *dev, bool completion);
static void spi_sam_iodev_start(const struct device *dev)
{
const struct spi_sam_config *cfg = dev->config;
struct spi_sam_data *data = dev->data;
struct rtio_sqe *sqe = &data->txn_curr->sqe;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe;
int ret = 0;
switch (sqe->op) {
@ -672,65 +676,48 @@ static void spi_sam_iodev_start(const struct device *dev)
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
struct rtio_iodev_sqe *txn_head = data->txn_head;
spi_sam_iodev_next(dev, true);
rtio_iodev_sqe_err(txn_head, -EINVAL);
ret = 0;
}
if (ret == 0) {
spi_sam_iodev_complete(dev, 0);
}
}
static void spi_sam_iodev_next(const struct device *dev, bool completion)
{
struct spi_sam_data *data = dev->data;
k_spinlock_key_t key = spi_spin_lock(dev);
if (!completion && data->txn_curr != NULL) {
spi_spin_unlock(dev, key);
spi_sam_iodev_complete(dev, -EINVAL);
return;
}
struct mpsc_node *next = mpsc_pop(&data->io_q);
if (next != NULL) {
struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
data->txn_head = next_sqe;
data->txn_curr = next_sqe;
} else {
data->txn_head = NULL;
data->txn_curr = NULL;
/** Completion of the RTIO transfer should come through the DMA
* callback when successful, otherwise complete it here as an error.
*/
if (ret != 0 && ret != -EWOULDBLOCK) {
spi_sam_iodev_complete(dev, ret);
}
}
spi_spin_unlock(dev, key);
static inline void spi_sam_iodev_prepare_start(const struct device *dev)
{
struct spi_sam_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
struct spi_dt_spec *spi_dt_spec = rtio_ctx->txn_curr->sqe.iodev->data;
struct spi_config *spi_config = &spi_dt_spec->config;
int err;
if (data->txn_curr != NULL) {
struct spi_dt_spec *spi_dt_spec = data->txn_curr->sqe.iodev->data;
struct spi_config *spi_cfg = &spi_dt_spec->config;
err = spi_sam_configure(dev, spi_config);
__ASSERT(!err, "%d", err);
spi_sam_configure(dev, spi_cfg);
spi_context_cs_control(&data->ctx, true);
spi_sam_iodev_start(dev);
}
spi_context_cs_control(&data->ctx, true);
}
static void spi_sam_iodev_complete(const struct device *dev, int status)
{
struct spi_sam_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
if (data->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
data->txn_curr = rtio_txn_next(data->txn_curr);
if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) {
rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr);
spi_sam_iodev_start(dev);
} else {
struct rtio_iodev_sqe *txn_head = data->txn_head;
/** De-assert CS-line to space from next transaction */
spi_context_cs_control(&data->ctx, false);
spi_sam_iodev_next(dev, true);
rtio_iodev_sqe_ok(txn_head, status);
if (spi_rtio_complete(rtio_ctx, status)) {
spi_sam_iodev_prepare_start(dev);
spi_sam_iodev_start(dev);
}
}
}
@ -738,9 +725,12 @@ static void spi_sam_iodev_submit(const struct device *dev,
struct rtio_iodev_sqe *iodev_sqe)
{
struct spi_sam_data *data = dev->data;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
mpsc_push(&data->io_q, &iodev_sqe->q);
spi_sam_iodev_next(dev, false);
if (spi_rtio_submit(rtio_ctx, iodev_sqe)) {
spi_sam_iodev_prepare_start(dev);
spi_sam_iodev_start(dev);
}
}
#endif
@ -755,34 +745,9 @@ static int spi_sam_transceive(const struct device *dev,
spi_context_lock(&data->ctx, false, NULL, NULL, config);
#if CONFIG_SPI_RTIO
struct rtio_sqe *sqe;
struct rtio_cqe *cqe;
struct spi_rtio *rtio_ctx = data->rtio_ctx;
struct spi_dt_spec *dt_spec = &data->dt_spec;
dt_spec->config = *config;
int ret = spi_rtio_copy(data->r, &data->iodev, tx_bufs, rx_bufs, &sqe);
if (ret < 0) {
err = ret;
goto done;
}
/* Submit request and wait */
rtio_submit(data->r, ret);
while (ret > 0) {
cqe = rtio_cqe_consume(data->r);
if (cqe->result < 0) {
err = cqe->result;
}
rtio_cqe_release(data->r, cqe);
ret--;
}
err = spi_rtio_transceive(rtio_ctx, config, tx_bufs, rx_bufs);
#else
const struct spi_sam_config *cfg = dev->config;
@ -804,8 +769,8 @@ static int spi_sam_transceive(const struct device *dev,
}
spi_context_cs_control(&data->ctx, false);
#endif
done:
#endif
spi_context_release(&data->ctx, err);
return err;
}
@ -866,10 +831,7 @@ static int spi_sam_init(const struct device *dev)
#endif
#ifdef CONFIG_SPI_RTIO
data->dt_spec.bus = dev;
data->iodev.api = &spi_iodev_api;
data->iodev.data = &data->dt_spec;
mpsc_init(&data->io_q);
spi_rtio_init(data->rtio_ctx, dev);
#endif
spi_context_unlock_unconditionally(&data->ctx);
@ -914,8 +876,9 @@ static const struct spi_driver_api spi_sam_driver_api = {
COND_CODE_1(SPI_SAM_USE_DMA(n), (SPI_DMA_INIT(n)), ()) \
}
#define SPI_SAM_RTIO_DEFINE(n) RTIO_DEFINE(spi_sam_rtio_##n, CONFIG_SPI_SAM_RTIO_SQ_SIZE, \
CONFIG_SPI_SAM_RTIO_SQ_SIZE)
#define SPI_SAM_RTIO_DEFINE(n) SPI_RTIO_DEFINE(spi_sam_rtio_##n, \
CONFIG_SPI_SAM_RTIO_SQ_SIZE, \
CONFIG_SPI_SAM_RTIO_SQ_SIZE)
#define SPI_SAM_DEVICE_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
@ -925,7 +888,7 @@ static const struct spi_driver_api spi_sam_driver_api = {
SPI_CONTEXT_INIT_LOCK(spi_sam_dev_data_##n, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_sam_dev_data_##n, ctx), \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
IF_ENABLED(CONFIG_SPI_RTIO, (.r = &spi_sam_rtio_##n)) \
IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &spi_sam_rtio_##n)) \
}; \
DEVICE_DT_INST_DEFINE(n, &spi_sam_init, NULL, \
&spi_sam_dev_data_##n, \