rtio: Add transceive op

Adds the transceive op which is needed for full SPI support. Notably
in RTIO transceive is expected to be a balanced buffer pair of the same
length and have non-null tx/rx bufs.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2023-03-06 14:10:45 -06:00 committed by Carles Cufí
parent 912e7ff863
commit a539d9c904
2 changed files with 45 additions and 1 deletions

View file

@ -175,10 +175,17 @@ struct rtio_sqe {
rtio_callback_t callback;
void *arg0; /**< Last argument given to callback */
};
/** OP_TXRX */
struct {
uint32_t txrx_buf_len;
uint8_t *tx_buf;
uint8_t *rx_buf;
};
};
};
/** @cond ignore */
/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
@ -365,6 +372,8 @@ struct rtio_iodev {
/** An operation that calls a given function (callback) */
#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
/** An operation that transceives (reads and writes simultaneously) */
#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
/**
@ -468,6 +477,27 @@ static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
sqe->userdata = userdata;
}
/**
* @brief Prepare a transceive op submission
*/
static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
uint8_t *tx_buf,
uint8_t *rx_buf,
uint32_t buf_len,
void *userdata)
{
sqe->op = RTIO_OP_TXRX;
sqe->prio = prio;
sqe->flags = 0;
sqe->iodev = iodev;
sqe->txrx_buf_len = buf_len;
sqe->tx_buf = tx_buf;
sqe->rx_buf = rx_buf;
sqe->userdata = userdata;
}
/**
* @brief Statically define and initialize a fixed length submission queue.
*
@ -647,6 +677,16 @@ static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
return cqe;
}
/**
* @brief Release consumed completion queue event
*
* @param r RTIO context
*/
static inline void rtio_cqe_release(struct rtio *r)
{
rtio_spsc_release(r->cq);
}
/**
* @brief Release all consumed completion queue events
*

View file

@ -35,6 +35,10 @@ static inline bool rtio_vrfy_sqe(struct rtio_sqe *sqe)
break;
case RTIO_OP_TINY_TX:
break;
case RTIO_OP_TXRX:
valid_sqe &= Z_SYSCALL_MEMORY(sqe->tx_buf, sqe->txrx_buf_len, true);
valid_sqe &= Z_SYSCALL_MEMORY(sqe->rx_buf, sqe->txrx_buf_len, true);
break;
default:
/* RTIO OP must be known and allowable from user mode
* otherwise it is invalid