12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
34struct z_spinlock_key {
50#ifdef CONFIG_TICKET_SPINLOCKS
69#ifdef CONFIG_SPIN_VALIDATE
74#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
81#if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
82 !defined(CONFIG_SPIN_VALIDATE)
107#ifdef CONFIG_SPIN_VALIDATE
109bool z_spin_unlock_valid(
struct k_spinlock *l);
110void z_spin_lock_set_owner(
struct k_spinlock *l);
111BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4,
"Too many CPUs for mask");
113# ifdef CONFIG_KERNEL_COHERENCE
114bool z_spin_lock_mem_coherent(
struct k_spinlock *l);
135#ifdef CONFIG_SPIN_VALIDATE
136 __ASSERT(z_spin_lock_valid(l),
"Invalid spinlock %p", l);
137#ifdef CONFIG_KERNEL_COHERENCE
138 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
146#ifdef CONFIG_SPIN_VALIDATE
147 z_spin_lock_set_owner(l);
148#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
193 z_spinlock_validate_pre(l);
195#ifdef CONFIG_TICKET_SPINLOCKS
211 z_spinlock_validate_post(l);
234 z_spinlock_validate_pre(l);
236#ifdef CONFIG_TICKET_SPINLOCKS
257 if (!
atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
266 z_spinlock_validate_post(l);
304#ifdef CONFIG_SPIN_VALIDATE
305 __ASSERT(z_spin_unlock_valid(l),
"Not my spinlock %p", l);
307#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
310 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
311 "Spin lock %p held %u cycles, longer than limit of %u cycles",
312 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
317#ifdef CONFIG_TICKET_SPINLOCKS
338#if defined(CONFIG_SMP) && defined(CONFIG_TEST)
348#ifdef CONFIG_TICKET_SPINLOCKS
351 return !
atomic_cas(&l->tail, ticket_val, ticket_val);
362#ifdef CONFIG_SPIN_VALIDATE
363 __ASSERT(z_spin_unlock_valid(l),
"Not my spinlock %p", l);
366#ifdef CONFIG_TICKET_SPINLOCKS
374#if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
377 __ASSERT(k->key,
"K_SPINLOCK exited with goto, break or return, "
378 "use K_SPINLOCK_BREAK instead.");
380#define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
382#define K_SPINLOCK_ONEXIT
395#define K_SPINLOCK_BREAK continue
438#define K_SPINLOCK(lck) \
439 for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key; \
440 k_spin_unlock((lck), __key), __i.key = 1)
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition irq.h:176
void arch_spin_relax(void)
Perform architecture specific processing within spin loops.
long atomic_t
Definition atomic_types.h:15
atomic_t atomic_val_t
Definition atomic_types.h:16
static ALWAYS_INLINE atomic_val_t atomic_inc(atomic_t *target)
Implementation of atomic_inc.
Definition atomic_xtensa.h:112
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Implementation of atomic_cas.
Definition atomic_xtensa.h:62
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
Implementation of atomic_get.
Definition atomic_xtensa.h:18
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
Implementation of atomic_clear.
Definition atomic_xtensa.h:165
#define ALWAYS_INLINE
Definition common.h:129
static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
Attempt to lock a spinlock.
Definition spinlock.h:230
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition spinlock.h:300
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition spinlock.h:182
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition spinlock.h:130
#define EBUSY
Mount device busy.
Definition errno.h:54
static k_spinlock_key_t key
Definition spinlock_error_case.c:15
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
Kernel Spin Lock.
Definition spinlock.h:45