rtio: Split the rx and tx buffer unions

Transmit/write buffers are expected to be constant values given to the
operation to transmit but not mutate. Seperate the OP_TX and OP_RX
operation description unions.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2024-07-31 10:37:03 -05:00 committed by Anas Nashif
parent 7598330a74
commit caa0c0a467
8 changed files with 79 additions and 73 deletions

View file

@ -192,13 +192,13 @@ static bool mcux_lpi2c_start(const struct device *dev)
switch (sqe->op) {
case RTIO_OP_RX:
return mcux_lpi2c_msg_start(dev, I2C_MSG_READ | sqe->iodev_flags,
sqe->buf, sqe->buf_len, dt_spec->addr);
sqe->rx.buf, sqe->rx.buf_len, dt_spec->addr);
case RTIO_OP_TINY_TX:
return mcux_lpi2c_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags,
sqe->tiny_buf, sqe->tiny_buf_len, dt_spec->addr);
sqe->tiny_tx.buf, sqe->tiny_tx.buf_len, dt_spec->addr);
case RTIO_OP_TX:
return mcux_lpi2c_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags,
sqe->buf, sqe->buf_len, dt_spec->addr);
sqe->tx.buf, sqe->tx.buf_len, dt_spec->addr);
case RTIO_OP_I2C_CONFIGURE:
res = mcux_lpi2c_do_configure(dev, sqe->i2c_config);
return i2c_rtio_complete(data->ctx, res);

View file

@ -69,13 +69,14 @@ static bool i2c_nrfx_twi_rtio_start(const struct device *dev)
switch (sqe->op) {
case RTIO_OP_RX:
return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_READ | sqe->iodev_flags,
sqe->buf, sqe->buf_len, dt_spec->addr);
sqe->rx.buf, sqe->rx.buf_len, dt_spec->addr);
case RTIO_OP_TINY_TX:
return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags,
sqe->tiny_buf, sqe->tiny_buf_len, dt_spec->addr);
sqe->tiny_tx.buf, sqe->tiny_tx.buf_len,
dt_spec->addr);
case RTIO_OP_TX:
return i2c_nrfx_twi_rtio_msg_start(dev, I2C_MSG_WRITE | sqe->iodev_flags,
sqe->buf, sqe->buf_len, dt_spec->addr);
sqe->tx.buf, sqe->tx.buf_len, dt_spec->addr);
case RTIO_OP_I2C_CONFIGURE:
(void)i2c_nrfx_twi_configure(dev, sqe->i2c_config);
return false;

View file

@ -164,8 +164,6 @@ static void read_msg_start(Twihs *const twihs, const uint32_t len, const uint8_t
/* Start the transfer by sending START condition */
twihs->TWIHS_CR = TWIHS_CR_START | twihs_cr_stop;
}
static void i2c_sam_twihs_complete(const struct device *dev, int status);
@ -189,11 +187,11 @@ static void i2c_sam_twihs_start(const struct device *dev)
switch (sqe->op) {
case RTIO_OP_RX:
read_msg_start(twihs, sqe->buf_len, dt_spec->addr);
read_msg_start(twihs, sqe->rx.buf_len, dt_spec->addr);
break;
case RTIO_OP_TX:
dev_data->buf_idx = 1;
write_msg_start(twihs, sqe->buf, 0, dt_spec->addr);
write_msg_start(twihs, sqe->tx.buf, 0, dt_spec->addr);
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
@ -245,10 +243,10 @@ static void i2c_sam_twihs_isr(const struct device *dev)
/* Byte received */
if (isr_status & TWIHS_SR_RXRDY) {
sqe->buf[dev_data->buf_idx] = twihs->TWIHS_RHR;
sqe->rx.buf[dev_data->buf_idx] = twihs->TWIHS_RHR;
dev_data->buf_idx += 1;
if (dev_data->buf_idx == sqe->buf_len - 1U) {
if (dev_data->buf_idx == sqe->rx.buf_len - 1U) {
/* Send STOP condition */
twihs->TWIHS_CR = TWIHS_CR_STOP;
}
@ -256,7 +254,7 @@ static void i2c_sam_twihs_isr(const struct device *dev)
/* Byte sent */
if (isr_status & TWIHS_SR_TXRDY) {
if (dev_data->buf_idx == sqe->buf_len) {
if (dev_data->buf_idx == sqe->tx.buf_len) {
if (sqe->iodev_flags & RTIO_IODEV_I2C_STOP) {
/* Send STOP condition */
twihs->TWIHS_CR = TWIHS_CR_STOP;
@ -268,7 +266,7 @@ static void i2c_sam_twihs_isr(const struct device *dev)
return;
}
} else {
twihs->TWIHS_THR = sqe->buf[dev_data->buf_idx++];
twihs->TWIHS_THR = sqe->tx.buf[dev_data->buf_idx++];
}
}

View file

@ -753,23 +753,23 @@ static void spi_mcux_iodev_start(const struct device *dev)
switch (sqe->op) {
case RTIO_OP_RX:
transfer.txData = NULL;
transfer.rxData = sqe->buf;
transfer.dataSize = sqe->buf_len;
transfer.rxData = sqe->rx.buf;
transfer.dataSize = sqe->rx.buf_len;
break;
case RTIO_OP_TX:
transfer.rxData = NULL;
transfer.txData = sqe->buf;
transfer.dataSize = sqe->buf_len;
transfer.txData = sqe->tx.buf;
transfer.dataSize = sqe->tx.buf_len;
break;
case RTIO_OP_TINY_TX:
transfer.rxData = NULL;
transfer.txData = sqe->tiny_buf;
transfer.dataSize = sqe->tiny_buf_len;
transfer.txData = sqe->tiny_tx.buf;
transfer.dataSize = sqe->tiny_tx.buf_len;
break;
case RTIO_OP_TXRX:
transfer.txData = sqe->tx_buf;
transfer.rxData = sqe->rx_buf;
transfer.dataSize = sqe->txrx_buf_len;
transfer.txData = sqe->txrx.tx_buf;
transfer.rxData = sqe->txrx.rx_buf;
transfer.dataSize = sqe->txrx.buf_len;
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);

View file

@ -657,16 +657,17 @@ static void spi_sam_iodev_start(const struct device *dev)
switch (sqe->op) {
case RTIO_OP_RX:
ret = spi_sam_rx(dev, cfg->regs, sqe->buf, sqe->buf_len);
ret = spi_sam_rx(dev, cfg->regs, sqe->rx.buf, sqe->rx.buf_len);
break;
case RTIO_OP_TX:
ret = spi_sam_tx(dev, cfg->regs, sqe->buf, sqe->buf_len);
ret = spi_sam_tx(dev, cfg->regs, sqe->tx.buf, sqe->tx.buf_len);
break;
case RTIO_OP_TINY_TX:
ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_buf, sqe->tiny_buf_len);
ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_tx.buf, sqe->tiny_tx.buf_len);
break;
case RTIO_OP_TXRX:
ret = spi_sam_txrx(dev, cfg->regs, sqe->tx_buf, sqe->rx_buf, sqe->txrx_buf_len);
ret = spi_sam_txrx(dev, cfg->regs, sqe->txrx.tx_buf, sqe->txrx.rx_buf,
sqe->txrx.buf_len);
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);

View file

@ -253,30 +253,36 @@ struct rtio_sqe {
union {
/** OP_TX, OP_RX */
/** OP_TX */
struct {
uint32_t buf_len; /**< Length of buffer */
uint8_t *buf; /**< Buffer to use*/
};
const uint8_t *buf; /**< Buffer to write from */
} tx;
/** OP_RX */
struct {
uint32_t buf_len; /**< Length of buffer */
uint8_t *buf; /**< Buffer to read into */
} rx;
/** OP_TINY_TX */
struct {
uint8_t tiny_buf_len; /**< Length of tiny buffer */
uint8_t tiny_buf[7]; /**< Tiny buffer */
};
uint8_t buf_len; /**< Length of tiny buffer */
uint8_t buf[7]; /**< Tiny buffer */
} tiny_tx;
/** OP_CALLBACK */
struct {
rtio_callback_t callback;
void *arg0; /**< Last argument given to callback */
};
} callback;
/** OP_TXRX */
struct {
uint32_t txrx_buf_len;
uint8_t *tx_buf;
uint8_t *rx_buf;
};
uint32_t buf_len; /**< Length of tx and rx buffers */
const uint8_t *tx_buf; /**< Buffer to write from */
uint8_t *rx_buf; /**< Buffer to read into */
} txrx;
/** OP_I2C_CONFIGURE */
uint32_t i2c_config;
@ -504,8 +510,8 @@ static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
sqe->op = RTIO_OP_RX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->buf_len = len;
sqe->buf = buf;
sqe->rx.buf_len = len;
sqe->rx.buf = buf;
sqe->userdata = userdata;
}
@ -536,7 +542,7 @@ static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
uint8_t *buf,
const uint8_t *buf,
uint32_t len,
void *userdata)
{
@ -544,8 +550,8 @@ static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
sqe->op = RTIO_OP_TX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->buf_len = len;
sqe->buf = buf;
sqe->tx.buf_len = len;
sqe->tx.buf = buf;
sqe->userdata = userdata;
}
@ -566,14 +572,14 @@ static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
uint8_t tiny_write_len,
void *userdata)
{
__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
memset(sqe, 0, sizeof(struct rtio_sqe));
sqe->op = RTIO_OP_TINY_TX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->tiny_buf_len = tiny_write_len;
memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
sqe->tiny_tx.buf_len = tiny_write_len;
memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
sqe->userdata = userdata;
}
@ -594,8 +600,8 @@ static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
sqe->op = RTIO_OP_CALLBACK;
sqe->prio = 0;
sqe->iodev = NULL;
sqe->callback = callback;
sqe->arg0 = arg0;
sqe->callback.callback = callback;
sqe->callback.arg0 = arg0;
sqe->userdata = userdata;
}
@ -605,7 +611,7 @@ static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
const struct rtio_iodev *iodev,
int8_t prio,
uint8_t *tx_buf,
const uint8_t *tx_buf,
uint8_t *rx_buf,
uint32_t buf_len,
void *userdata)
@ -614,9 +620,9 @@ static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
sqe->op = RTIO_OP_TXRX;
sqe->prio = prio;
sqe->iodev = iodev;
sqe->txrx_buf_len = buf_len;
sqe->tx_buf = tx_buf;
sqe->rx_buf = rx_buf;
sqe->txrx.buf_len = buf_len;
sqe->txrx.tx_buf = tx_buf;
sqe->txrx.rx_buf = rx_buf;
sqe->userdata = userdata;
}
@ -1040,9 +1046,9 @@ static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
struct rtio *r = iodev_sqe->r;
struct sys_mem_blocks *mem_pool = r->block_pool;
int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) >>
int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
mem_pool->info.blk_sz_shift;
int blk_count = iodev_sqe->sqe.buf_len >> mem_pool->info.blk_sz_shift;
int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
}
@ -1189,19 +1195,19 @@ static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32
if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
struct rtio *r = iodev_sqe->r;
if (sqe->buf != NULL) {
if (sqe->buf_len < min_buf_len) {
if (sqe->rx.buf != NULL) {
if (sqe->rx.buf_len < min_buf_len) {
return -ENOMEM;
}
*buf = sqe->buf;
*buf_len = sqe->buf_len;
*buf = sqe->rx.buf;
*buf_len = sqe->rx.buf_len;
return 0;
}
int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
if (rc == 0) {
sqe->buf = *buf;
sqe->buf_len = *buf_len;
sqe->rx.buf = *buf;
sqe->rx.buf_len = *buf_len;
return 0;
}
@ -1211,12 +1217,12 @@ static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32
ARG_UNUSED(max_buf_len);
#endif
if (sqe->buf_len < min_buf_len) {
if (sqe->rx.buf_len < min_buf_len) {
return -ENOMEM;
}
*buf = sqe->buf;
*buf_len = sqe->buf_len;
*buf = sqe->rx.buf;
*buf_len = sqe->rx.buf_len;
return 0;
}

View file

@ -19,7 +19,7 @@ static void rtio_executor_op(struct rtio_iodev_sqe *iodev_sqe)
switch (sqe->op) {
case RTIO_OP_CALLBACK:
sqe->callback(iodev_sqe->r, sqe, sqe->arg0);
sqe->callback.callback(iodev_sqe->r, sqe, sqe->callback.arg0);
rtio_iodev_sqe_ok(iodev_sqe, 0);
break;
default:
@ -124,13 +124,13 @@ static inline void rtio_executor_handle_multishot(struct rtio *r, struct rtio_io
if (curr->sqe.op == RTIO_OP_RX && FIELD_GET(RTIO_SQE_MEMPOOL_BUFFER, curr->sqe.flags)) {
if (is_canceled) {
/* Free the memory first since no CQE will be generated */
LOG_DBG("Releasing memory @%p size=%u", (void *)curr->sqe.buf,
curr->sqe.buf_len);
rtio_release_buffer(r, curr->sqe.buf, curr->sqe.buf_len);
LOG_DBG("Releasing memory @%p size=%u", (void *)curr->sqe.rx.buf,
curr->sqe.rx.buf_len);
rtio_release_buffer(r, curr->sqe.rx.buf, curr->sqe.rx.buf_len);
}
/* Reset the buffer info so the next request can get a new one */
curr->sqe.buf = NULL;
curr->sqe.buf_len = 0;
curr->sqe.rx.buf = NULL;
curr->sqe.rx.buf_len = 0;
}
if (!is_canceled) {
/* Request was not canceled, put the SQE back in the queue */

View file

@ -29,18 +29,18 @@ static inline bool rtio_vrfy_sqe(struct rtio_sqe *sqe)
case RTIO_OP_NOP:
break;
case RTIO_OP_TX:
valid_sqe &= K_SYSCALL_MEMORY(sqe->buf, sqe->buf_len, false);
valid_sqe &= K_SYSCALL_MEMORY(sqe->tx.buf, sqe->tx.buf_len, false);
break;
case RTIO_OP_RX:
if ((sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) == 0) {
valid_sqe &= K_SYSCALL_MEMORY(sqe->buf, sqe->buf_len, true);
valid_sqe &= K_SYSCALL_MEMORY(sqe->rx.buf, sqe->rx.buf_len, true);
}
break;
case RTIO_OP_TINY_TX:
break;
case RTIO_OP_TXRX:
valid_sqe &= K_SYSCALL_MEMORY(sqe->tx_buf, sqe->txrx_buf_len, true);
valid_sqe &= K_SYSCALL_MEMORY(sqe->rx_buf, sqe->txrx_buf_len, true);
valid_sqe &= K_SYSCALL_MEMORY(sqe->txrx.tx_buf, sqe->txrx.buf_len, true);
valid_sqe &= K_SYSCALL_MEMORY(sqe->txrx.rx_buf, sqe->txrx.buf_len, true);
break;
default:
/* RTIO OP must be known and allowable from user mode