Zephyr Project API  3.4.0
A Scalable Open Source RTOS
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45
71#define RTIO_PRIO_LOW 0U
72
76#define RTIO_PRIO_NORM 127U
77
81#define RTIO_PRIO_HIGH 255U
82
102#define RTIO_SQE_CHAINED BIT(0)
103
114#define RTIO_SQE_TRANSACTION BIT(1)
115
116
120#define RTIO_IODEV_I2C_STOP BIT(0)
121
125#define RTIO_IODEV_I2C_RESTART BIT(1)
126
130#define RTIO_IODEV_I2C_10_BITS BIT(2)
131
145#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
146
153#define RTIO_SQE_CANCELED BIT(3)
154
161#define RTIO_SQE_MULTISHOT BIT(4)
162
180#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
181
182#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
183
190#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
191
198#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
199
207#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
208 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
209 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
210
216struct rtio;
217struct rtio_cqe;
218struct rtio_sqe;
219struct rtio_sqe_pool;
220struct rtio_cqe_pool;
221struct rtio_block_pool;
222struct rtio_iodev;
223struct rtio_iodev_sqe;
233typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
234
238struct rtio_sqe {
247 uint16_t _resv0;
248
249 const struct rtio_iodev *iodev;
258 void *userdata;
259
260 union {
261
263 struct {
266 };
267
269 struct {
272 };
273
275 struct {
277 void *arg0;
278 };
279
281 struct {
285 };
286
287 };
288};
289
291/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
292BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
298struct rtio_cqe {
300
302 void *userdata;
304};
305
311};
312
317 struct rtio_cqe *pool;
318};
319
321 /* Memory pool associated with this RTIO context. */
322 struct sys_mem_blocks *mempool;
323 /* The size (in bytes) of a single block in the mempool */
325};
326
338struct rtio {
339#ifdef CONFIG_RTIO_SUBMIT_SEM
340 /* A wait semaphore which may suspend the calling thread
341 * to wait for some number of completions when calling submit
342 */
343 struct k_sem *submit_sem;
344
345 uint32_t submit_count;
346#endif
347
348#ifdef CONFIG_RTIO_CONSUME_SEM
349 /* A wait semaphore which may suspend the calling thread
350 * to wait for some number of completions while consuming
351 * them from the completion queue
352 */
353 struct k_sem *consume_sem;
354#endif
355
356 /* Number of completions that were unable to be submitted with results
357 * due to the cq spsc being full
358 */
360
361 /* Submission queue object pool with free list */
363
364 /* Complete queue object pool with free list */
366
367#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
368 /* Mem block pool */
369 struct rtio_block_pool *block_pool;
370#endif
371
372 /* Submission queue */
373 struct rtio_mpsc sq;
374
375 /* Completion queue */
376 struct rtio_mpsc cq;
377};
378
380extern struct k_mem_partition rtio_partition;
381
389#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
390static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
391{
392 uintptr_t addr = (uintptr_t)ptr;
393 struct sys_mem_blocks *mem_pool = r->block_pool->mempool;
394 uint32_t block_size = r->block_pool->blk_size;
395
396 uintptr_t buff = (uintptr_t)mem_pool->buffer;
397 uint32_t buff_size = mem_pool->num_blocks * block_size;
398
399 if (addr < buff || addr >= buff + buff_size) {
400 return UINT16_MAX;
401 }
402 return (addr - buff) / block_size;
403}
404#endif
405
412 struct rtio_sqe sqe;
415 struct rtio *r;
416};
417
433 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
434};
435
440 /* Function pointer table */
441 const struct rtio_iodev_api *api;
442
443 /* Queue of RTIO contexts with requests */
445
446 /* Data associated with this iodev */
447 void *data;
448};
449
451#define RTIO_OP_NOP 0
452
454#define RTIO_OP_RX (RTIO_OP_NOP+1)
455
457#define RTIO_OP_TX (RTIO_OP_RX+1)
458
460#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
461
463#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
464
466#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
467
468
472static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
473 const struct rtio_iodev *iodev,
474 void *userdata)
475{
476 memset(sqe, 0, sizeof(struct rtio_sqe));
477 sqe->op = RTIO_OP_NOP;
478 sqe->iodev = iodev;
479 sqe->userdata = userdata;
480}
481
485static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
486 const struct rtio_iodev *iodev,
487 int8_t prio,
488 uint8_t *buf,
489 uint32_t len,
490 void *userdata)
491{
492 memset(sqe, 0, sizeof(struct rtio_sqe));
493 sqe->op = RTIO_OP_RX;
494 sqe->prio = prio;
495 sqe->iodev = iodev;
496 sqe->buf_len = len;
497 sqe->buf = buf;
498 sqe->userdata = userdata;
499}
500
506static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
507 const struct rtio_iodev *iodev, int8_t prio,
508 void *userdata)
509{
510 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
512}
513
514static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
515 const struct rtio_iodev *iodev, int8_t prio,
516 void *userdata)
517{
518 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
520}
521
525static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
526 const struct rtio_iodev *iodev,
527 int8_t prio,
528 uint8_t *buf,
529 uint32_t len,
530 void *userdata)
531{
532 memset(sqe, 0, sizeof(struct rtio_sqe));
533 sqe->op = RTIO_OP_TX;
534 sqe->prio = prio;
535 sqe->iodev = iodev;
536 sqe->buf_len = len;
537 sqe->buf = buf;
538 sqe->userdata = userdata;
539}
540
551static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
552 const struct rtio_iodev *iodev,
553 int8_t prio,
554 const uint8_t *tiny_write_data,
555 uint8_t tiny_write_len,
556 void *userdata)
557{
558 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
559
560 memset(sqe, 0, sizeof(struct rtio_sqe));
561 sqe->op = RTIO_OP_TINY_TX;
562 sqe->prio = prio;
563 sqe->iodev = iodev;
564 sqe->tiny_buf_len = tiny_write_len;
565 memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
566 sqe->userdata = userdata;
567}
568
577static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
578 rtio_callback_t callback,
579 void *arg0,
580 void *userdata)
581{
582 memset(sqe, 0, sizeof(struct rtio_sqe));
583 sqe->op = RTIO_OP_CALLBACK;
584 sqe->prio = 0;
585 sqe->iodev = NULL;
586 sqe->callback = callback;
587 sqe->arg0 = arg0;
588 sqe->userdata = userdata;
589}
590
594static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
595 const struct rtio_iodev *iodev,
596 int8_t prio,
597 uint8_t *tx_buf,
598 uint8_t *rx_buf,
599 uint32_t buf_len,
600 void *userdata)
601{
602 memset(sqe, 0, sizeof(struct rtio_sqe));
603 sqe->op = RTIO_OP_TXRX;
604 sqe->prio = prio;
605 sqe->iodev = iodev;
606 sqe->txrx_buf_len = buf_len;
607 sqe->tx_buf = tx_buf;
608 sqe->rx_buf = rx_buf;
609 sqe->userdata = userdata;
610}
611
612static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
613{
614 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
615
616 if (node == NULL) {
617 return NULL;
618 }
619
620 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
621
622 pool->pool_free--;
623
624 return iodev_sqe;
625}
626
627static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
628{
629 rtio_mpsc_push(&pool->free_q, &iodev_sqe->q);
630
631 pool->pool_free++;
632}
633
634static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
635{
636 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
637
638 if (node == NULL) {
639 return NULL;
640 }
641
642 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
643
644 memset(cqe, 0, sizeof(struct rtio_cqe));
645
646 pool->pool_free--;
647
648 return cqe;
649}
650
651static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
652{
653 rtio_mpsc_push(&pool->free_q, &cqe->q);
654
655 pool->pool_free++;
656}
657
658static inline int rtio_block_pool_alloc(struct rtio_block_pool *pool, size_t min_sz,
659 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
660{
661 uint32_t bytes = max_sz;
662
663 do {
664 size_t num_blks = DIV_ROUND_UP(bytes, pool->blk_size);
665 int rc = sys_mem_blocks_alloc_contiguous(pool->mempool, num_blks, (void **)buf);
666
667 if (rc == 0) {
668 *buf_len = num_blks * pool->blk_size;
669 return 0;
670 }
671
672 bytes -= pool->blk_size;
673 } while (bytes >= min_sz);
674
675 return -ENOMEM;
676}
677
678static inline void rtio_block_pool_free(struct rtio_block_pool *pool, void *buf, uint32_t buf_len)
679{
680 size_t num_blks = buf_len / pool->blk_size;
681
682 sys_mem_blocks_free_contiguous(pool->mempool, buf, num_blks);
683}
684
685/* Do not try and reformat the macros */
686/* clang-format off */
687
695#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
696 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
697 .api = (iodev_api), \
698 .iodev_sq = RTIO_MPSC_INIT((name.iodev_sq)), \
699 .data = (iodev_data), \
700 }
701
702#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
703 static struct rtio_iodev_sqe _sqe_pool_##name[sz]; \
704 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
705 .free_q = RTIO_MPSC_INIT((name.free_q)), \
706 .pool_size = sz, \
707 .pool_free = sz, \
708 .pool = _sqe_pool_##name, \
709 }
710
711
712#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
713 static struct rtio_cqe _cqe_pool_##name[sz]; \
714 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
715 .free_q = RTIO_MPSC_INIT((name.free_q)), \
716 .pool_size = sz, \
717 .pool_free = sz, \
718 .pool = _cqe_pool_##name, \
719 }
720
730#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
731
741#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
742
743#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
744 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
745 _block_pool_##name[blk_cnt*WB_UP(blk_sz)]; \
746 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(_sys_blocks_##name, WB_UP(blk_sz), \
747 blk_cnt, _block_pool_##name, \
748 RTIO_DMEM); \
749 static struct rtio_block_pool name = { \
750 .mempool = &_sys_blocks_##name, \
751 .blk_size = blk_sz, \
752 }
753
754#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
755 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
756 (static K_SEM_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT))) \
757 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
758 (static K_SEM_DEFINE(_consume_sem_##name, 0, K_SEM_MAX_LIMIT))) \
759 STRUCT_SECTION_ITERABLE(rtio, name) = { \
760 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &_submit_sem_##name,)) \
761 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
762 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &_consume_sem_##name,)) \
763 .xcqcnt = ATOMIC_INIT(0), \
764 .sqe_pool = _sqe_pool, \
765 .cqe_pool = _cqe_pool, \
766 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
767 .sq = RTIO_MPSC_INIT((name.sq)), \
768 .cq = RTIO_MPSC_INIT((name.cq)), \
769 }
770
778#define RTIO_DEFINE(name, sq_sz, cq_sz) \
779 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
780 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
781 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, NULL) \
782
783/* clang-format on */
784
795#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
796 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
797 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
798 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
799 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
800
801/* clang-format on */
802
810static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
811{
812 return r->sqe_pool->pool_free;
813}
814
822static inline uint32_t rtio_cqe_consumable(struct rtio *r)
823{
824 return (r->cqe_pool->pool_size - r->cqe_pool->pool_free);
825}
826
835static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
836{
837 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
838 return iodev_sqe->next;
839 } else {
840 return NULL;
841 }
842}
843
844
853static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
854{
855 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
856 return iodev_sqe->next;
857 } else {
858 return NULL;
859 }
860}
861
870static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
871{
872 return iodev_sqe->next;
873}
874
883static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
884{
885 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
886
887 if (iodev_sqe == NULL) {
888 return NULL;
889 }
890
891 rtio_mpsc_push(&r->sq, &iodev_sqe->q);
892
893 return &iodev_sqe->sqe;
894}
895
901static inline void rtio_sqe_drop_all(struct rtio *r)
902{
903 struct rtio_iodev_sqe *iodev_sqe;
904 struct rtio_mpsc_node *node = rtio_mpsc_pop(&r->sq);
905
906 while (node != NULL) {
907 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
908 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
909 node = rtio_mpsc_pop(&r->sq);
910 }
911}
912
916static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
917{
918 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
919
920 if (cqe == NULL) {
921 return NULL;
922 }
923
924 memset(cqe, 0, sizeof(struct rtio_cqe));
925
926 return cqe;
927}
928
932static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
933{
934 rtio_mpsc_push(&r->cq, &cqe->q);
935}
936
948static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
949{
950 struct rtio_mpsc_node *node;
951 struct rtio_cqe *cqe = NULL;
952
953#ifdef CONFIG_RTIO_CONSUME_SEM
954 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
955 return NULL;
956 }
957#endif
958
959 node = rtio_mpsc_pop(&r->cq);
960 if (node == NULL) {
961 return NULL;
962 }
963 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
964
965 return cqe;
966}
967
978static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
979{
980 struct rtio_mpsc_node *node;
981 struct rtio_cqe *cqe;
982
983#ifdef CONFIG_RTIO_CONSUME_SEM
984 k_sem_take(r->consume_sem, K_FOREVER);
985#endif
986 node = rtio_mpsc_pop(&r->cq);
987 while (node == NULL) {
988 node = rtio_mpsc_pop(&r->cq);
989 Z_SPIN_DELAY(1);
990 }
991 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
992
993 return cqe;
994}
995
1002static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1003{
1004 rtio_cqe_pool_free(r->cqe_pool, cqe);
1005}
1006
1013static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1014{
1015 uint32_t flags = 0;
1016
1017 ARG_UNUSED(iodev_sqe);
1018
1019#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1020 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1021 struct rtio *r = iodev_sqe->r;
1022 struct sys_mem_blocks *mem_pool = r->block_pool->mempool;
1023 uint32_t block_size = r->block_pool->blk_size;
1024 int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) / block_size;
1025 int blk_count = iodev_sqe->sqe.buf_len / block_size;
1026
1027 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1028 }
1029#endif
1030
1031 return flags;
1032}
1033
1049__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1050 uint8_t **buff, uint32_t *buff_len);
1051
1052static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1053 uint8_t **buff, uint32_t *buff_len)
1054{
1055#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1057 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1058 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1059
1060 *buff = r->block_pool->mempool->buffer + blk_idx * r->block_pool->blk_size;
1061 *buff_len = blk_count * r->block_pool->blk_size;
1062 __ASSERT_NO_MSG(*buff >= r->block_pool->mempool->buffer);
1063 __ASSERT_NO_MSG(*buff <
1064 r->block_pool->mempool->buffer +
1065 r->block_pool->blk_size * r->block_pool->mempool->num_blocks);
1066 return 0;
1067 }
1068 return -EINVAL;
1069#else
1070 ARG_UNUSED(r);
1071 ARG_UNUSED(cqe);
1072 ARG_UNUSED(buff);
1073 ARG_UNUSED(buff_len);
1074
1075 return -ENOTSUP;
1076#endif
1077}
1078
1080void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1081void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1082
1091static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1092{
1093 rtio_executor_ok(iodev_sqe, result);
1094}
1095
1104static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1105{
1106 rtio_executor_err(iodev_sqe, result);
1107}
1108
1114static inline void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
1115{
1116 /* Clear pending requests as -ENODATA */
1117 struct rtio_mpsc_node *node = rtio_mpsc_pop(&iodev->iodev_sq);
1118
1119 while (node != NULL) {
1120 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1121
1122 rtio_iodev_sqe_err(iodev_sqe, -ECANCELED);
1123 node = rtio_mpsc_pop(&iodev->iodev_sq);
1124 }
1125}
1126
1138static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1139{
1140 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1141
1142 if (cqe == NULL) {
1143 atomic_inc(&r->xcqcnt);
1144 } else {
1145 cqe->result = result;
1146 cqe->userdata = userdata;
1147 cqe->flags = flags;
1148 rtio_cqe_produce(r, cqe);
1149 }
1150#ifdef CONFIG_RTIO_SUBMIT_SEM
1151 if (r->submit_count > 0) {
1152 r->submit_count--;
1153 if (r->submit_count == 0) {
1154 k_sem_give(r->submit_sem);
1155 }
1156 }
1157#endif
1158#ifdef CONFIG_RTIO_CONSUME_SEM
1159 k_sem_give(r->consume_sem);
1160#endif
1161}
1162
1163#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1164
1177static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1178 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1179{
1180 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1181
1182#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1183 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1184 struct rtio *r = iodev_sqe->r;
1185
1186 if (sqe->buf != NULL) {
1187 if (sqe->buf_len < min_buf_len) {
1188 return -ENOMEM;
1189 }
1190 *buf = sqe->buf;
1191 *buf_len = sqe->buf_len;
1192 return 0;
1193 }
1194
1195 int rc = rtio_block_pool_alloc(r->block_pool, min_buf_len, max_buf_len,
1196 buf, buf_len);
1197 if (rc == 0) {
1198 sqe->buf = *buf;
1199 sqe->buf_len = *buf_len;
1200 return 0;
1201 }
1202
1203 return -ENOMEM;
1204 }
1205#endif
1206 if (sqe->buf_len < min_buf_len) {
1207 return -ENOMEM;
1208 }
1209
1210 *buf = sqe->buf;
1211 *buf_len = sqe->buf_len;
1212 return 0;
1213}
1214
1229__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1230
1231static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1232{
1233#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1234 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1235 return;
1236 }
1237
1238 rtio_block_pool_free(r->block_pool, buff, buff_len);
1239#endif
1240}
1241
1245static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1246{
1248
1249#ifdef CONFIG_RTIO_SUBMIT_SEM
1250 k_object_access_grant(r->submit_sem, t);
1251#endif
1252
1253#ifdef CONFIG_RTIO_CONSUME_SEM
1254 k_object_access_grant(r->consume_sem, t);
1255#endif
1256}
1257
1268__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1269
1270static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1271{
1272 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1273
1274 do {
1275 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1276 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1277 } while (iodev_sqe != NULL);
1278
1279 return 0;
1280}
1281
1297__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1298 struct rtio_sqe **handle, size_t sqe_count);
1299
1300static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1301 struct rtio_sqe **handle,
1302 size_t sqe_count)
1303{
1304 struct rtio_sqe *sqe;
1305 uint32_t acquirable = rtio_sqe_acquirable(r);
1306
1307 if (acquirable < sqe_count) {
1308 return -ENOMEM;
1309 }
1310
1311 for (unsigned long i = 0; i < sqe_count; i++) {
1312 sqe = rtio_sqe_acquire(r);
1313 __ASSERT_NO_MSG(sqe != NULL);
1314 if (handle != NULL && i == 0) {
1315 *handle = sqe;
1316 }
1317 *sqe = sqes[i];
1318 }
1319
1320 return 0;
1321}
1322
1339static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1340{
1341 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1342}
1343
1359__syscall int rtio_cqe_copy_out(struct rtio *r,
1360 struct rtio_cqe *cqes,
1361 size_t cqe_count,
1363static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1364 struct rtio_cqe *cqes,
1365 size_t cqe_count,
1367{
1368 size_t copied = 0;
1369 struct rtio_cqe *cqe;
1371
1372 do {
1375 if (cqe == NULL) {
1376#ifdef CONFIG_BOARD_NATIVE_POSIX
1377 /* Native posix fakes the clock and only moves it forward when sleeping. */
1378 k_sleep(K_TICKS(1));
1379#else
1380 Z_SPIN_DELAY(1);
1381#endif
1382 continue;
1383 }
1384 cqes[copied++] = *cqe;
1385 rtio_cqe_release(r, cqe);
1386 } while (copied < cqe_count && end > k_uptime_ticks());
1387
1388 return copied;
1389}
1390
1404__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1405
1406static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1407{
1408 int res = 0;
1409
1410#ifdef CONFIG_RTIO_SUBMIT_SEM
1411 /* TODO undefined behavior if another thread calls submit of course
1412 */
1413 if (wait_count > 0) {
1414 __ASSERT(!k_is_in_isr(),
1415 "expected rtio submit with wait count to be called from a thread");
1416
1417 k_sem_reset(r->submit_sem);
1418 r->submit_count = wait_count;
1419 }
1420#endif
1421
1422 /* Submit the queue to the executor which consumes submissions
1423 * and produces completions through ISR chains or other means.
1424 */
1426
1427
1428 /* TODO could be nicer if we could suspend the thread and not
1429 * wake up on each completion here.
1430 */
1431#ifdef CONFIG_RTIO_SUBMIT_SEM
1432
1433 if (wait_count > 0) {
1434 res = k_sem_take(r->submit_sem, K_FOREVER);
1435 __ASSERT(res == 0,
1436 "semaphore was reset or timed out while waiting on completions!");
1437 }
1438#else
1439 while (rtio_cqe_consumable(r) < wait_count) {
1440 Z_SPIN_DELAY(10);
1441 k_yield();
1442 }
1443#endif
1444
1445 return res;
1446}
1447
1452#ifdef __cplusplus
1453}
1454#endif
1455
1456#include <syscalls/rtio.h>
1457
1458#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition: asm-macro-32-bit-gnu.h:24
long atomic_t
Definition: atomic.h:22
atomic_val_t atomic_inc(atomic_t *target)
ZTEST_BMEM int timeout
Definition: main.c:31
struct result result[2]
Definition: errno.c:42
#define K_FOREVER
Generate infinite timeout delay.
Definition: kernel.h:1325
#define K_NO_WAIT
Generate null timeout delay.
Definition: kernel.h:1215
int64_t k_uptime_ticks(void)
Get system uptime, in system ticks.
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition: sys_clock.h:80
#define K_TICKS(t)
Generate timeout delay from system ticks.
Definition: kernel.h:1267
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition: rtio.h:594
static void rtio_block_pool_free(struct rtio_block_pool *pool, void *buf, uint32_t buf_len)
Definition: rtio.h:678
static int rtio_block_pool_alloc(struct rtio_block_pool *pool, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition: rtio.h:658
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition: rtio.h:506
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
Definition: rtio.h:463
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition: rtio.h:810
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition: rtio.h:651
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition: rtio.h:551
static void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
Cancel all requests that are pending for the iodev.
Definition: rtio.h:1114
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Definition: rtio.h:1138
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition: rtio.h:472
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition: rtio.h:1339
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition: rtio.h:932
#define RTIO_OP_TINY_TX
Definition: rtio.h:460
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition: rtio.h:1013
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition: rtio.h:634
struct k_mem_partition rtio_partition
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition: rtio.h:485
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition: rtio.h:883
#define RTIO_OP_TX
Definition: rtio.h:457
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition: rtio.h:901
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition: rtio.h:514
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition: rtio.h:577
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Definition: rtio.h:1245
#define RTIO_OP_TXRX
Definition: rtio.h:466
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition: rtio.h:1002
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition: rtio.h:1177
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition: rtio.h:1104
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition: rtio.h:525
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition: rtio.h:233
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition: rtio.h:627
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition: rtio.h:1091
#define RTIO_OP_NOP
Definition: rtio.h:451
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition: rtio.h:916
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition: rtio.h:853
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition: rtio.h:948
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition: rtio.h:612
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition: rtio.h:870
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition: rtio.h:835
void rtio_executor_submit(struct rtio *r)
static uint32_t rtio_cqe_consumable(struct rtio *r)
Count of likely, but not gauranteed, consumable completion queue events.
Definition: rtio.h:822
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition: rtio.h:978
#define RTIO_OP_RX
Definition: rtio.h:454
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition: rtio.h:198
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition: rtio.h:190
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition: rtio.h:180
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition: rtio.h:207
#define RTIO_CQE_FLAG_GET(flags)
Definition: rtio.h:182
static struct rtio_mpsc_node * rtio_mpsc_pop(struct rtio_mpsc *q)
Pop a node off of the list.
Definition: rtio_mpsc.h:147
static ALWAYS_INLINE void rtio_mpsc_push(struct rtio_mpsc *q, struct rtio_mpsc_node *n)
Push a node.
Definition: rtio_mpsc.h:128
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition: rtio.h:161
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition: rtio.h:114
#define RTIO_SQE_MEMPOOL_BUFFER
Equivalent to the I2C_MSG_ADDR_10_BITS.
Definition: rtio.h:145
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition: rtio.h:153
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition: rtio.h:102
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition: util.h:224
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition: util.h:262
#define EINVAL
Definition: errno.h:61
#define ENOMEM
Definition: errno.h:51
#define ENOTSUP
Definition: errno.h:115
#define ECANCELED
Definition: errno.h:118
void k_yield(void)
Yield the current thread.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_object_access_grant(const void *object, struct k_thread *thread)
Public kernel APIs.
uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
struct k_thread t
Definition: kobject.c:1327
struct k_queue q
Definition: kobject.c:1323
Memory Blocks Allocator.
flags
Definition: parser.h:96
void * ptr
Definition: printk.c:120
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list....
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__INT32_TYPE__ int32_t
Definition: stdint.h:74
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
#define UINT16_MAX
Definition: stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
__UINT16_TYPE__ uint16_t
Definition: stdint.h:89
__INT64_TYPE__ int64_t
Definition: stdint.h:75
__INT8_TYPE__ int8_t
Definition: stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition: mem_domain.h:55
Definition: thread.h:245
Kernel timeout type.
Definition: sys_clock.h:65
Definition: errno.c:37
Definition: rtio.h:320
const uint32_t blk_size
Definition: rtio.h:324
struct sys_mem_blocks * mempool
Definition: rtio.h:322
Definition: rtio.h:313
struct rtio_cqe * pool
Definition: rtio.h:317
const uint16_t pool_size
Definition: rtio.h:315
uint16_t pool_free
Definition: rtio.h:316
struct rtio_mpsc free_q
Definition: rtio.h:314
A completion queue event.
Definition: rtio.h:298
void * userdata
Definition: rtio.h:302
struct rtio_mpsc_node q
Definition: rtio.h:299
uint32_t flags
Definition: rtio.h:303
int32_t result
Definition: rtio.h:301
API that an RTIO IO device should implement.
Definition: rtio.h:421
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition: rtio.h:433
Compute the mempool block index for a given pointer.
Definition: rtio.h:411
struct rtio_mpsc_node q
Definition: rtio.h:413
struct rtio_iodev_sqe * next
Definition: rtio.h:414
struct rtio_sqe sqe
Definition: rtio.h:412
struct rtio * r
Definition: rtio.h:415
An IO device with a function table for submitting requests.
Definition: rtio.h:439
struct rtio_mpsc iodev_sq
Definition: rtio.h:444
const struct rtio_iodev_api * api
Definition: rtio.h:441
void * data
Definition: rtio.h:447
Queue member.
Definition: rtio_mpsc.h:81
MPSC Queue.
Definition: rtio_mpsc.h:88
Definition: rtio.h:306
struct rtio_mpsc free_q
Definition: rtio.h:307
struct rtio_iodev_sqe * pool
Definition: rtio.h:310
const uint16_t pool_size
Definition: rtio.h:308
uint16_t pool_free
Definition: rtio.h:309
A submission queue event.
Definition: rtio.h:238
void * userdata
Definition: rtio.h:258
uint8_t * tx_buf
Definition: rtio.h:283
uint8_t op
Definition: rtio.h:239
void * arg0
Definition: rtio.h:277
uint8_t * rx_buf
Definition: rtio.h:284
uint8_t prio
Definition: rtio.h:241
uint8_t * buf
Definition: rtio.h:265
uint32_t buf_len
Definition: rtio.h:264
const struct rtio_iodev * iodev
Definition: rtio.h:249
uint16_t flags
Definition: rtio.h:243
uint8_t tiny_buf_len
Definition: rtio.h:270
uint32_t txrx_buf_len
Definition: rtio.h:282
uint8_t tiny_buf[7]
Definition: rtio.h:271
rtio_callback_t callback
Definition: rtio.h:276
uint16_t iodev_flags
Definition: rtio.h:245
An RTIO context containing what can be viewed as a pair of queues.
Definition: rtio.h:338
struct rtio_cqe_pool * cqe_pool
Definition: rtio.h:365
struct rtio_mpsc cq
Definition: rtio.h:376
struct rtio_mpsc sq
Definition: rtio.h:373
struct rtio_sqe_pool * sqe_pool
Definition: rtio.h:362
atomic_t xcqcnt
Definition: rtio.h:359
Misc utilities.