|
| #define | K_ANY NULL |
| |
| #define | K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x))) |
| |
| #define | K_PRIO_PREEMPT(x) (x) |
| |
| #define | K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES) |
| |
| #define | K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES |
| |
| #define | K_IDLE_PRIO K_LOWEST_THREAD_PRIO |
| |
| #define | K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO) |
| |
| #define | K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1) |
| |
| #define | K_ESSENTIAL (BIT(0)) |
| | system thread that must not abort
|
| |
| #define | K_FP_IDX 1 |
| |
| #define | K_FP_REGS (BIT(K_FP_IDX)) |
| | FPU registers are managed by context switch.
|
| |
| #define | K_USER (BIT(2)) |
| | user mode thread
|
| |
| #define | K_INHERIT_PERMS (BIT(3)) |
| | Inherit Permissions.
|
| |
| #define | K_CALLBACK_STATE (BIT(4)) |
| | Callback item state.
|
| |
| #define | K_DSP_IDX 6 |
| | DSP registers are managed by context switch.
|
| |
| #define | K_DSP_REGS (BIT(K_DSP_IDX)) |
| |
| #define | K_AGU_IDX 7 |
| | AGU registers are managed by context switch.
|
| |
| #define | K_AGU_REGS (BIT(K_AGU_IDX)) |
| |
| #define | K_SSE_REGS (BIT(7)) |
| | FP and SSE registers are managed by context switch on x86.
|
| |
| #define | k_thread_access_grant(thread, ...) FOR_EACH_FIXED_ARG(k_object_access_grant, (;), (thread), __VA_ARGS__) |
| | Grant a thread access to a set of kernel objects.
|
| |
| #define | K_THREAD_DEFINE(name, stack_size, entry, p1, p2, p3, prio, options, delay) |
| | Statically define and initialize a thread.
|
| |
| #define | K_KERNEL_THREAD_DEFINE(name, stack_size, entry, p1, p2, p3, prio, options, delay) |
| | Statically define and initialize a thread intended to run only in kernel mode.
|
| |
| #define | K_NO_WAIT Z_TIMEOUT_NO_WAIT |
| | Generate null timeout delay.
|
| |
| #define | K_NSEC(t) Z_TIMEOUT_NS(t) |
| | Generate timeout delay from nanoseconds.
|
| |
| #define | K_USEC(t) Z_TIMEOUT_US(t) |
| | Generate timeout delay from microseconds.
|
| |
| #define | K_CYC(t) Z_TIMEOUT_CYC(t) |
| | Generate timeout delay from cycles.
|
| |
| #define | K_TICKS(t) Z_TIMEOUT_TICKS(t) |
| | Generate timeout delay from system ticks.
|
| |
| #define | K_MSEC(ms) Z_TIMEOUT_MS(ms) |
| | Generate timeout delay from milliseconds.
|
| |
| #define | K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC) |
| | Generate timeout delay from seconds.
|
| |
| #define | K_MINUTES(m) K_SECONDS((m) * 60) |
| | Generate timeout delay from minutes.
|
| |
| #define | K_HOURS(h) K_MINUTES((h) * 60) |
| | Generate timeout delay from hours.
|
| |
| #define | K_FOREVER Z_FOREVER |
| | Generate infinite timeout delay.
|
| |
| #define | K_TIMER_DEFINE(name, expiry_fn, stop_fn) |
| | Statically define and initialize a timer.
|
| |
| #define | K_QUEUE_DEFINE(name) |
| | Statically define and initialize a queue.
|
| |
| #define | K_EVENT_DEFINE(name) |
| | Statically define and initialize an event object.
|
| |
| #define | k_fifo_init(fifo) |
| | Initialize a FIFO queue.
|
| |
| #define | k_fifo_cancel_wait(fifo) |
| | Cancel waiting on a FIFO queue.
|
| |
| #define | k_fifo_put(fifo, data) |
| | Add an element to a FIFO queue.
|
| |
| #define | k_fifo_alloc_put(fifo, data) |
| | Add an element to a FIFO queue.
|
| |
| #define | k_fifo_put_list(fifo, head, tail) |
| | Atomically add a list of elements to a FIFO.
|
| |
| #define | k_fifo_put_slist(fifo, list) |
| | Atomically add a list of elements to a FIFO queue.
|
| |
| #define | k_fifo_get(fifo, timeout) |
| | Get an element from a FIFO queue.
|
| |
| #define | k_fifo_is_empty(fifo) k_queue_is_empty(&(fifo)->_queue) |
| | Query a FIFO queue to see if it has data available.
|
| |
| #define | k_fifo_peek_head(fifo) |
| | Peek element at the head of a FIFO queue.
|
| |
| #define | k_fifo_peek_tail(fifo) |
| | Peek element at the tail of FIFO queue.
|
| |
| #define | K_FIFO_DEFINE(name) |
| | Statically define and initialize a FIFO queue.
|
| |
| #define | k_lifo_init(lifo) |
| | Initialize a LIFO queue.
|
| |
| #define | k_lifo_put(lifo, data) |
| | Add an element to a LIFO queue.
|
| |
| #define | k_lifo_alloc_put(lifo, data) |
| | Add an element to a LIFO queue.
|
| |
| #define | k_lifo_get(lifo, timeout) |
| | Get an element from a LIFO queue.
|
| |
| #define | K_LIFO_DEFINE(name) |
| | Statically define and initialize a LIFO queue.
|
| |
| #define | K_STACK_DEFINE(name, stack_num_entries) |
| | Statically define and initialize a stack.
|
| |
| #define | K_MUTEX_DEFINE(name) |
| | Statically define and initialize a mutex.
|
| |
| #define | K_CONDVAR_DEFINE(name) |
| | Statically define and initialize a condition variable.
|
| |
| #define | K_SEM_MAX_LIMIT UINT_MAX |
| | Maximum limit value allowed for a semaphore.
|
| |
| #define | K_SEM_DEFINE(name, initial_count, count_limit) |
| | Statically define and initialize a semaphore.
|
| |
| #define | K_WORK_DELAYABLE_DEFINE(work, work_handler) |
| | Initialize a statically-defined delayable work item.
|
| |
| #define | K_WORK_USER_DEFINE(work, work_handler) struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler) |
| | Initialize a statically-defined user work item.
|
| |
| #define | K_WORK_DEFINE(work, work_handler) struct k_work work = Z_WORK_INITIALIZER(work_handler) |
| | Initialize a statically-defined work item.
|
| |
| #define | K_MSGQ_FLAG_ALLOC BIT(0) |
| |
| #define | K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) |
| | Statically define and initialize a message queue.
|
| |
| #define | K_MBOX_DEFINE(name) |
| | Statically define and initialize a mailbox.
|
| |
| #define | K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) |
| | Statically define and initialize a pipe.
|
| |
| #define | K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) |
| | Statically define and initialize a memory slab in a public (non-static) scope.
|
| |
| #define | K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) |
| | Statically define and initialize a memory slab in a private (static) scope.
|
| |
| #define | K_HEAP_DEFINE(name, bytes) |
| | Define a static k_heap.
|
| |
| #define | K_HEAP_DEFINE_NOCACHE(name, bytes) Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache) |
| | Define a static k_heap in uncached memory.
|
| |
| #define | K_POLL_TYPE_IGNORE 0 |
| |
| #define | K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL) |
| |
| #define | K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE) |
| |
| #define | K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE) |
| |
| #define | K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE |
| |
| #define | K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE) |
| |
| #define | K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE) |
| |
| #define | K_POLL_STATE_NOT_READY 0 |
| |
| #define | K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED) |
| |
| #define | K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE) |
| |
| #define | K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE) |
| |
| #define | K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE |
| |
| #define | K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE) |
| |
| #define | K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE) |
| |
| #define | K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED) |
| |
| #define | K_POLL_SIGNAL_INITIALIZER(obj) |
| |
| #define | K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) |
| |
| #define | K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, event_tag) |
| |
| #define | k_oops() z_except_reason(K_ERR_KERNEL_OOPS) |
| | Fatally terminate a thread.
|
| |
| #define | k_panic() z_except_reason(K_ERR_KERNEL_PANIC) |
| | Fatally terminate the system.
|
| |
|
| void | k_thread_foreach (k_thread_user_cb_t user_cb, void *user_data) |
| | Iterate over all the threads in the system.
|
| |
| void | k_thread_foreach_filter_by_cpu (unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data) |
| | Iterate over all the threads in running on specified cpu.
|
| |
| void | k_thread_foreach_unlocked (k_thread_user_cb_t user_cb, void *user_data) |
| | Iterate over all the threads in the system without locking.
|
| |
| void | k_thread_foreach_unlocked_filter_by_cpu (unsigned int cpu, k_thread_user_cb_t user_cb, void *user_data) |
| | Iterate over the threads in running on current cpu without locking.
|
| |
| k_thread_stack_t * | k_thread_stack_alloc (size_t size, int flags) |
| | Dynamically allocate a thread stack.
|
| |
| int | k_thread_stack_free (k_thread_stack_t *stack) |
| | Free a dynamically allocated thread stack.
|
| |
| k_tid_t | k_thread_create (struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, k_timeout_t delay) |
| | Create a thread.
|
| |
| FUNC_NORETURN void | k_thread_user_mode_enter (k_thread_entry_t entry, void *p1, void *p2, void *p3) |
| | Drop a thread's privileges permanently to user mode.
|
| |
| static void | k_thread_heap_assign (struct k_thread *thread, struct k_heap *heap) |
| | Assign a resource memory pool to a thread.
|
| |
| int | k_thread_join (struct k_thread *thread, k_timeout_t timeout) |
| | Sleep until a thread exits.
|
| |
| int32_t | k_sleep (k_timeout_t timeout) |
| | Put the current thread to sleep.
|
| |
| static int32_t | k_msleep (int32_t ms) |
| | Put the current thread to sleep.
|
| |
| int32_t | k_usleep (int32_t us) |
| | Put the current thread to sleep with microsecond resolution.
|
| |
| void | k_busy_wait (uint32_t usec_to_wait) |
| | Cause the current thread to busy wait.
|
| |
| bool | k_can_yield (void) |
| | Check whether it is possible to yield in the current context.
|
| |
| void | k_yield (void) |
| | Yield the current thread.
|
| |
| void | k_wakeup (k_tid_t thread) |
| | Wake up a sleeping thread.
|
| |
| __attribute_const__ k_tid_t | k_sched_current_thread_query (void) |
| | Query thread ID of the current thread.
|
| |
| static __attribute_const__ k_tid_t | k_current_get (void) |
| | Get thread ID of the current thread.
|
| |
| void | k_thread_abort (k_tid_t thread) |
| | Abort a thread.
|
| |
| void | k_thread_start (k_tid_t thread) |
| | Start an inactive thread.
|
| |
| k_ticks_t | k_thread_timeout_expires_ticks (const struct k_thread *thread) |
| | Get time when a thread wakes up, in system ticks.
|
| |
| k_ticks_t | k_thread_timeout_remaining_ticks (const struct k_thread *thread) |
| | Get time remaining before a thread wakes up, in system ticks.
|
| |
| int | k_thread_priority_get (k_tid_t thread) |
| | Get a thread's priority.
|
| |
| void | k_thread_priority_set (k_tid_t thread, int prio) |
| | Set a thread's priority.
|
| |
| void | k_thread_deadline_set (k_tid_t thread, int deadline) |
| | Set deadline expiration time for scheduler.
|
| |
| int | k_thread_cpu_mask_clear (k_tid_t thread) |
| | Sets all CPU enable masks to zero.
|
| |
| int | k_thread_cpu_mask_enable_all (k_tid_t thread) |
| | Sets all CPU enable masks to one.
|
| |
| int | k_thread_cpu_mask_enable (k_tid_t thread, int cpu) |
| | Enable thread to run on specified CPU.
|
| |
| int | k_thread_cpu_mask_disable (k_tid_t thread, int cpu) |
| | Prevent thread to run on specified CPU.
|
| |
| int | k_thread_cpu_pin (k_tid_t thread, int cpu) |
| | Pin a thread to a CPU.
|
| |
| void | k_thread_suspend (k_tid_t thread) |
| | Suspend a thread.
|
| |
| void | k_thread_resume (k_tid_t thread) |
| | Resume a suspended thread.
|
| |
| void | k_sched_time_slice_set (int32_t slice, int prio) |
| | Set time-slicing period and scope.
|
| |
| void | k_thread_time_slice_set (struct k_thread *th, int32_t slice_ticks, k_thread_timeslice_fn_t expired, void *data) |
| | Set thread time slice.
|
| |
| bool | k_is_in_isr (void) |
| | Determine if code is running at interrupt level.
|
| |
| int | k_is_preempt_thread (void) |
| | Determine if code is running in a preemptible thread.
|
| |
| static bool | k_is_pre_kernel (void) |
| | Test whether startup is in the before-main-task phase.
|
| |
| void | k_sched_lock (void) |
| | Lock the scheduler.
|
| |
| void | k_sched_unlock (void) |
| | Unlock the scheduler.
|
| |
| void | k_thread_custom_data_set (void *value) |
| | Set current thread's custom data.
|
| |
| void * | k_thread_custom_data_get (void) |
| | Get current thread's custom data.
|
| |
| int | k_thread_name_set (k_tid_t thread, const char *str) |
| | Set current thread name.
|
| |
| const char * | k_thread_name_get (k_tid_t thread) |
| | Get thread name.
|
| |
| int | k_thread_name_copy (k_tid_t thread, char *buf, size_t size) |
| | Copy the thread name into a supplied buffer.
|
| |
| const char * | k_thread_state_str (k_tid_t thread_id, char *buf, size_t buf_size) |
| | Get thread state string.
|
| |
| void | k_timer_init (struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_stop_t stop_fn) |
| | Initialize a timer.
|
| |
| void | k_timer_start (struct k_timer *timer, k_timeout_t duration, k_timeout_t period) |
| | Start a timer.
|
| |
| void | k_timer_stop (struct k_timer *timer) |
| | Stop a timer.
|
| |
| uint32_t | k_timer_status_get (struct k_timer *timer) |
| | Read timer status.
|
| |
| uint32_t | k_timer_status_sync (struct k_timer *timer) |
| | Synchronize thread to timer expiration.
|
| |
| k_ticks_t | k_timer_expires_ticks (const struct k_timer *timer) |
| | Get next expiration time of a timer, in system ticks.
|
| |
| k_ticks_t | k_timer_remaining_ticks (const struct k_timer *timer) |
| | Get time remaining before a timer next expires, in system ticks.
|
| |
| static uint32_t | k_timer_remaining_get (struct k_timer *timer) |
| | Get time remaining before a timer next expires.
|
| |
| void | k_timer_user_data_set (struct k_timer *timer, void *user_data) |
| | Associate user-specific data with a timer.
|
| |
| void * | k_timer_user_data_get (const struct k_timer *timer) |
| | Retrieve the user-specific data from a timer.
|
| |
| int64_t | k_uptime_ticks (void) |
| | Get system uptime, in system ticks.
|
| |
| static int64_t | k_uptime_get (void) |
| | Get system uptime.
|
| |
| static uint32_t | k_uptime_get_32 (void) |
| | Get system uptime (32-bit version).
|
| |
| static uint32_t | k_uptime_seconds (void) |
| | Get system uptime in seconds.
|
| |
| static int64_t | k_uptime_delta (int64_t *reftime) |
| | Get elapsed time.
|
| |
| static uint32_t | k_cycle_get_32 (void) |
| | Read the hardware clock.
|
| |
| static uint64_t | k_cycle_get_64 (void) |
| | Read the 64-bit hardware clock.
|
| |
| void | k_queue_init (struct k_queue *queue) |
| | Initialize a queue.
|
| |
| void | k_queue_cancel_wait (struct k_queue *queue) |
| | Cancel waiting on a queue.
|
| |
| void | k_queue_append (struct k_queue *queue, void *data) |
| | Append an element to the end of a queue.
|
| |
| int32_t | k_queue_alloc_append (struct k_queue *queue, void *data) |
| | Append an element to a queue.
|
| |
| void | k_queue_prepend (struct k_queue *queue, void *data) |
| | Prepend an element to a queue.
|
| |
| int32_t | k_queue_alloc_prepend (struct k_queue *queue, void *data) |
| | Prepend an element to a queue.
|
| |
| void | k_queue_insert (struct k_queue *queue, void *prev, void *data) |
| | Inserts an element to a queue.
|
| |
| int | k_queue_append_list (struct k_queue *queue, void *head, void *tail) |
| | Atomically append a list of elements to a queue.
|
| |
| int | k_queue_merge_slist (struct k_queue *queue, sys_slist_t *list) |
| | Atomically add a list of elements to a queue.
|
| |
| void * | k_queue_get (struct k_queue *queue, k_timeout_t timeout) |
| | Get an element from a queue.
|
| |
| bool | k_queue_remove (struct k_queue *queue, void *data) |
| | Remove an element from a queue.
|
| |
| bool | k_queue_unique_append (struct k_queue *queue, void *data) |
| | Append an element to a queue only if it's not present already.
|
| |
| int | k_queue_is_empty (struct k_queue *queue) |
| | Query a queue to see if it has data available.
|
| |
| void * | k_queue_peek_head (struct k_queue *queue) |
| | Peek element at the head of queue.
|
| |
| void * | k_queue_peek_tail (struct k_queue *queue) |
| | Peek element at the tail of queue.
|
| |
| int | k_futex_wait (struct k_futex *futex, int expected, k_timeout_t timeout) |
| | Pend the current thread on a futex.
|
| |
| int | k_futex_wake (struct k_futex *futex, bool wake_all) |
| | Wake one/all threads pending on a futex.
|
| |
| void | k_event_init (struct k_event *event) |
| | Initialize an event object.
|
| |
| uint32_t | k_event_post (struct k_event *event, uint32_t events) |
| | Post one or more events to an event object.
|
| |
| uint32_t | k_event_set (struct k_event *event, uint32_t events) |
| | Set the events in an event object.
|
| |
| uint32_t | k_event_set_masked (struct k_event *event, uint32_t events, uint32_t events_mask) |
| | Set or clear the events in an event object.
|
| |
| uint32_t | k_event_clear (struct k_event *event, uint32_t events) |
| | Clear the events in an event object.
|
| |
| uint32_t | k_event_wait (struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout) |
| | Wait for any of the specified events.
|
| |
| uint32_t | k_event_wait_all (struct k_event *event, uint32_t events, bool reset, k_timeout_t timeout) |
| | Wait for all of the specified events.
|
| |
| static uint32_t | k_event_test (struct k_event *event, uint32_t events_mask) |
| | Test the events currently tracked in the event object.
|
| |
| void | k_stack_init (struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries) |
| | Initialize a stack.
|
| |
| int32_t | k_stack_alloc_init (struct k_stack *stack, uint32_t num_entries) |
| | Initialize a stack.
|
| |
| int | k_stack_cleanup (struct k_stack *stack) |
| | Release a stack's allocated buffer.
|
| |
| int | k_stack_push (struct k_stack *stack, stack_data_t data) |
| | Push an element onto a stack.
|
| |
| int | k_stack_pop (struct k_stack *stack, stack_data_t *data, k_timeout_t timeout) |
| | Pop an element from a stack.
|
| |
| int | k_mutex_init (struct k_mutex *mutex) |
| | Initialize a mutex.
|
| |
| int | k_mutex_lock (struct k_mutex *mutex, k_timeout_t timeout) |
| | Lock a mutex.
|
| |
| int | k_mutex_unlock (struct k_mutex *mutex) |
| | Unlock a mutex.
|
| |
| int | k_condvar_init (struct k_condvar *condvar) |
| | Initialize a condition variable.
|
| |
| int | k_condvar_signal (struct k_condvar *condvar) |
| | Signals one thread that is pending on the condition variable.
|
| |
| int | k_condvar_broadcast (struct k_condvar *condvar) |
| | Unblock all threads that are pending on the condition variable.
|
| |
| int | k_condvar_wait (struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout) |
| | Waits on the condition variable releasing the mutex lock.
|
| |
| int | k_sem_init (struct k_sem *sem, unsigned int initial_count, unsigned int limit) |
| | Initialize a semaphore.
|
| |
| int | k_sem_take (struct k_sem *sem, k_timeout_t timeout) |
| | Take a semaphore.
|
| |
| void | k_sem_give (struct k_sem *sem) |
| | Give a semaphore.
|
| |
| void | k_sem_reset (struct k_sem *sem) |
| | Resets a semaphore's count to zero.
|
| |
| unsigned int | k_sem_count_get (struct k_sem *sem) |
| | Get a semaphore's count.
|
| |
| void | k_work_init (struct k_work *work, k_work_handler_t handler) |
| | Initialize a (non-delayable) work structure.
|
| |
| int | k_work_busy_get (const struct k_work *work) |
| | Busy state flags from the work item.
|
| |
| static bool | k_work_is_pending (const struct k_work *work) |
| | Test whether a work item is currently pending.
|
| |
| int | k_work_submit_to_queue (struct k_work_q *queue, struct k_work *work) |
| | Submit a work item to a queue.
|
| |
| int | k_work_submit (struct k_work *work) |
| | Submit a work item to the system queue.
|
| |
| bool | k_work_flush (struct k_work *work, struct k_work_sync *sync) |
| | Wait for last-submitted instance to complete.
|
| |
| int | k_work_cancel (struct k_work *work) |
| | Cancel a work item.
|
| |
| bool | k_work_cancel_sync (struct k_work *work, struct k_work_sync *sync) |
| | Cancel a work item and wait for it to complete.
|
| |
| void | k_work_queue_init (struct k_work_q *queue) |
| | Initialize a work queue structure.
|
| |
| void | k_work_queue_start (struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) |
| | Initialize a work queue.
|
| |
| static k_tid_t | k_work_queue_thread_get (struct k_work_q *queue) |
| | Access the thread that animates a work queue.
|
| |
| int | k_work_queue_drain (struct k_work_q *queue, bool plug) |
| | Wait until the work queue has drained, optionally plugging it.
|
| |
| int | k_work_queue_unplug (struct k_work_q *queue) |
| | Release a work queue to accept new submissions.
|
| |
| void | k_work_init_delayable (struct k_work_delayable *dwork, k_work_handler_t handler) |
| | Initialize a delayable work structure.
|
| |
| static struct k_work_delayable * | k_work_delayable_from_work (struct k_work *work) |
| | Get the parent delayable work structure from a work pointer.
|
| |
| int | k_work_delayable_busy_get (const struct k_work_delayable *dwork) |
| | Busy state flags from the delayable work item.
|
| |
| static bool | k_work_delayable_is_pending (const struct k_work_delayable *dwork) |
| | Test whether a delayed work item is currently pending.
|
| |
| static k_ticks_t | k_work_delayable_expires_get (const struct k_work_delayable *dwork) |
| | Get the absolute tick count at which a scheduled delayable work will be submitted.
|
| |
| static k_ticks_t | k_work_delayable_remaining_get (const struct k_work_delayable *dwork) |
| | Get the number of ticks until a scheduled delayable work will be submitted.
|
| |
| int | k_work_schedule_for_queue (struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) |
| | Submit an idle work item to a queue after a delay.
|
| |
| int | k_work_schedule (struct k_work_delayable *dwork, k_timeout_t delay) |
| | Submit an idle work item to the system work queue after a delay.
|
| |
| int | k_work_reschedule_for_queue (struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) |
| | Reschedule a work item to a queue after a delay.
|
| |
| int | k_work_reschedule (struct k_work_delayable *dwork, k_timeout_t delay) |
| | Reschedule a work item to the system work queue after a delay.
|
| |
| bool | k_work_flush_delayable (struct k_work_delayable *dwork, struct k_work_sync *sync) |
| | Flush delayable work.
|
| |
| int | k_work_cancel_delayable (struct k_work_delayable *dwork) |
| | Cancel delayable work.
|
| |
| bool | k_work_cancel_delayable_sync (struct k_work_delayable *dwork, struct k_work_sync *sync) |
| | Cancel delayable work and wait.
|
| |
| static void | k_work_user_init (struct k_work_user *work, k_work_user_handler_t handler) |
| | Initialize a userspace work item.
|
| |
| static bool | k_work_user_is_pending (struct k_work_user *work) |
| | Check if a userspace work item is pending.
|
| |
| static int | k_work_user_submit_to_queue (struct k_work_user_q *work_q, struct k_work_user *work) |
| | Submit a work item to a user mode workqueue.
|
| |
| void | k_work_user_queue_start (struct k_work_user_q *work_q, k_thread_stack_t *stack, size_t stack_size, int prio, const char *name) |
| | Start a workqueue in user mode.
|
| |
| static k_tid_t | k_work_user_queue_thread_get (struct k_work_user_q *work_q) |
| | Access the user mode thread that animates a work queue.
|
| |
| void | k_work_poll_init (struct k_work_poll *work, k_work_handler_t handler) |
| | Initialize a triggered work item.
|
| |
| int | k_work_poll_submit_to_queue (struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) |
| | Submit a triggered work item.
|
| |
| int | k_work_poll_submit (struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) |
| | Submit a triggered work item to the system workqueue.
|
| |
| int | k_work_poll_cancel (struct k_work_poll *work) |
| | Cancel a triggered work item.
|
| |
| void | k_msgq_init (struct k_msgq *msgq, char *buffer, size_t msg_size, uint32_t max_msgs) |
| | Initialize a message queue.
|
| |
| int | k_msgq_alloc_init (struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs) |
| | Initialize a message queue.
|
| |
| int | k_msgq_cleanup (struct k_msgq *msgq) |
| | Release allocated buffer for a queue.
|
| |
| int | k_msgq_put (struct k_msgq *msgq, const void *data, k_timeout_t timeout) |
| | Send a message to a message queue.
|
| |
| int | k_msgq_get (struct k_msgq *msgq, void *data, k_timeout_t timeout) |
| | Receive a message from a message queue.
|
| |
| int | k_msgq_peek (struct k_msgq *msgq, void *data) |
| | Peek/read a message from a message queue.
|
| |
| int | k_msgq_peek_at (struct k_msgq *msgq, void *data, uint32_t idx) |
| | Peek/read a message from a message queue at the specified index.
|
| |
| void | k_msgq_purge (struct k_msgq *msgq) |
| | Purge a message queue.
|
| |
| uint32_t | k_msgq_num_free_get (struct k_msgq *msgq) |
| | Get the amount of free space in a message queue.
|
| |
| void | k_msgq_get_attrs (struct k_msgq *msgq, struct k_msgq_attrs *attrs) |
| | Get basic attributes of a message queue.
|
| |
| uint32_t | k_msgq_num_used_get (struct k_msgq *msgq) |
| | Get the number of messages in a message queue.
|
| |
| void | k_mbox_init (struct k_mbox *mbox) |
| | Initialize a mailbox.
|
| |
| int | k_mbox_put (struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) |
| | Send a mailbox message in a synchronous manner.
|
| |
| void | k_mbox_async_put (struct k_mbox *mbox, struct k_mbox_msg *tx_msg, struct k_sem *sem) |
| | Send a mailbox message in an asynchronous manner.
|
| |
| int | k_mbox_get (struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) |
| | Receive a mailbox message.
|
| |
| void | k_mbox_data_get (struct k_mbox_msg *rx_msg, void *buffer) |
| | Retrieve mailbox message data into a buffer.
|
| |
| void | k_pipe_init (struct k_pipe *pipe, unsigned char *buffer, size_t size) |
| | Initialize a pipe.
|
| |
| int | k_pipe_cleanup (struct k_pipe *pipe) |
| | Release a pipe's allocated buffer.
|
| |
| int | k_pipe_alloc_init (struct k_pipe *pipe, size_t size) |
| | Initialize a pipe and allocate a buffer for it.
|
| |
| int | k_pipe_put (struct k_pipe *pipe, const void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) |
| | Write data to a pipe.
|
| |
| int | k_pipe_get (struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) |
| | Read data from a pipe.
|
| |
| size_t | k_pipe_read_avail (struct k_pipe *pipe) |
| | Query the number of bytes that may be read from pipe.
|
| |
| size_t | k_pipe_write_avail (struct k_pipe *pipe) |
| | Query the number of bytes that may be written to pipe.
|
| |
| void | k_pipe_flush (struct k_pipe *pipe) |
| | Flush the pipe of write data.
|
| |
| void | k_pipe_buffer_flush (struct k_pipe *pipe) |
| | Flush the pipe's internal buffer.
|
| |
| int | k_mem_slab_init (struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks) |
| | Initialize a memory slab.
|
| |
| int | k_mem_slab_alloc (struct k_mem_slab *slab, void **mem, k_timeout_t timeout) |
| | Allocate memory from a memory slab.
|
| |
| void | k_mem_slab_free (struct k_mem_slab *slab, void *mem) |
| | Free memory allocated from a memory slab.
|
| |
| static uint32_t | k_mem_slab_num_used_get (struct k_mem_slab *slab) |
| | Get the number of used blocks in a memory slab.
|
| |
| static uint32_t | k_mem_slab_max_used_get (struct k_mem_slab *slab) |
| | Get the number of maximum used blocks so far in a memory slab.
|
| |
| static uint32_t | k_mem_slab_num_free_get (struct k_mem_slab *slab) |
| | Get the number of unused blocks in a memory slab.
|
| |
| int | k_mem_slab_runtime_stats_get (struct k_mem_slab *slab, struct sys_memory_stats *stats) |
| | Get the memory stats for a memory slab.
|
| |
| int | k_mem_slab_runtime_stats_reset_max (struct k_mem_slab *slab) |
| | Reset the maximum memory usage for a slab.
|
| |
| void | k_heap_init (struct k_heap *h, void *mem, size_t bytes) |
| | Initialize a k_heap.
|
| |
| void * | k_heap_aligned_alloc (struct k_heap *h, size_t align, size_t bytes, k_timeout_t timeout) |
| | Allocate aligned memory from a k_heap.
|
| |
| void * | k_heap_alloc (struct k_heap *h, size_t bytes, k_timeout_t timeout) |
| | Allocate memory from a k_heap.
|
| |
| void * | k_heap_realloc (struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout) |
| | Reallocate memory from a k_heap.
|
| |
| void | k_heap_free (struct k_heap *h, void *mem) |
| | Free memory allocated by k_heap_alloc()
|
| |
| void * | k_aligned_alloc (size_t align, size_t size) |
| | Allocate memory from the heap with a specified alignment.
|
| |
| void * | k_malloc (size_t size) |
| | Allocate memory from the heap.
|
| |
| void | k_free (void *ptr) |
| | Free memory allocated from heap.
|
| |
| void * | k_calloc (size_t nmemb, size_t size) |
| | Allocate memory from heap, array style.
|
| |
| void * | k_realloc (void *ptr, size_t size) |
| | Expand the size of an existing allocation.
|
| |
| void | k_poll_event_init (struct k_poll_event *event, uint32_t type, int mode, void *obj) |
| | Initialize one struct k_poll_event instance.
|
| |
| int | k_poll (struct k_poll_event *events, int num_events, k_timeout_t timeout) |
| | Wait for one or many of multiple poll events to occur.
|
| |
| void | k_poll_signal_init (struct k_poll_signal *sig) |
| | Initialize a poll signal object.
|
| |
| void | k_poll_signal_reset (struct k_poll_signal *sig) |
| | Reset a poll signal object's state to unsignaled.
|
| |
| void | k_poll_signal_check (struct k_poll_signal *sig, unsigned int *signaled, int *result) |
| | Fetch the signaled state and result value of a poll signal.
|
| |
| int | k_poll_signal_raise (struct k_poll_signal *sig, int result) |
| | Signal a poll signal object.
|
| |
| static void | k_cpu_idle (void) |
| | Make the CPU idle.
|
| |
| static void | k_cpu_atomic_idle (unsigned int key) |
| | Make the CPU idle in an atomic fashion.
|
| |
| int | k_float_disable (struct k_thread *thread) |
| | Disable preservation of floating point context information.
|
| |
| int | k_float_enable (struct k_thread *thread, unsigned int options) |
| | Enable preservation of floating point context information.
|
| |
| int | k_thread_runtime_stats_get (k_tid_t thread, k_thread_runtime_stats_t *stats) |
| | Get the runtime statistics of a thread.
|
| |
| int | k_thread_runtime_stats_all_get (k_thread_runtime_stats_t *stats) |
| | Get the runtime statistics of all threads.
|
| |
| int | k_thread_runtime_stats_cpu_get (int cpu, k_thread_runtime_stats_t *stats) |
| | Get the runtime statistics of all threads on specified cpu.
|
| |
| int | k_thread_runtime_stats_enable (k_tid_t thread) |
| | Enable gathering of runtime statistics for specified thread.
|
| |
| int | k_thread_runtime_stats_disable (k_tid_t thread) |
| | Disable gathering of runtime statistics for specified thread.
|
| |
| void | k_sys_runtime_stats_enable (void) |
| | Enable gathering of system runtime statistics.
|
| |
| void | k_sys_runtime_stats_disable (void) |
| | Disable gathering of system runtime statistics.
|
| |