Zephyr Project API 3.7.0
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
spinlock.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
14
15#include <errno.h>
16#include <stdbool.h>
17
18#include <zephyr/arch/cpu.h>
19#include <zephyr/sys/atomic.h>
20#include <zephyr/sys/__assert.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
34struct z_spinlock_key {
35 int key;
36};
37
45struct k_spinlock {
49#ifdef CONFIG_SMP
50#ifdef CONFIG_TICKET_SPINLOCKS
51 /*
52 * Ticket spinlocks are conceptually two atomic variables,
53 * one indicating the current FIFO head (spinlock owner),
54 * and the other indicating the current FIFO tail.
55 * Spinlock is acquired in the following manner:
56 * - current FIFO tail value is atomically incremented while it's
57 * original value is saved as a "ticket"
58 * - we spin until the FIFO head becomes equal to the ticket value
59 *
60 * Spinlock is released by atomic increment of the FIFO head
61 */
62 atomic_t owner;
63 atomic_t tail;
64#else
65 atomic_t locked;
66#endif /* CONFIG_TICKET_SPINLOCKS */
67#endif /* CONFIG_SMP */
68
69#ifdef CONFIG_SPIN_VALIDATE
70 /* Stores the thread that holds the lock with the locking CPU
71 * ID in the bottom two bits.
72 */
73 uintptr_t thread_cpu;
74#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
75 /* Stores the time (in cycles) when a lock was taken
76 */
77 uint32_t lock_time;
78#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
79#endif /* CONFIG_SPIN_VALIDATE */
80
81#if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
82 !defined(CONFIG_SPIN_VALIDATE)
83 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
84 * the k_spinlock struct will have no members. The result
85 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
86 *
87 * This size difference causes problems when the k_spinlock
88 * is embedded into another struct like k_msgq, because C and
89 * C++ will have different ideas on the offsets of the members
90 * that come after the k_spinlock member.
91 *
92 * To prevent this we add a 1 byte dummy member to k_spinlock
93 * when the user selects C++ support and k_spinlock would
94 * otherwise be empty.
95 */
96 char dummy;
97#endif
101};
102
103/* There's a spinlock validation framework available when asserts are
104 * enabled. It adds a relatively hefty overhead (about 3k or so) to
105 * kernel code size, don't use on platforms known to be small.
106 */
107#ifdef CONFIG_SPIN_VALIDATE
108bool z_spin_lock_valid(struct k_spinlock *l);
109bool z_spin_unlock_valid(struct k_spinlock *l);
110void z_spin_lock_set_owner(struct k_spinlock *l);
111BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
112
113# ifdef CONFIG_KERNEL_COHERENCE
114bool z_spin_lock_mem_coherent(struct k_spinlock *l);
115# endif /* CONFIG_KERNEL_COHERENCE */
116
117#endif /* CONFIG_SPIN_VALIDATE */
118
130typedef struct z_spinlock_key k_spinlock_key_t;
131
132static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l)
133{
134 ARG_UNUSED(l);
135#ifdef CONFIG_SPIN_VALIDATE
136 __ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l);
137#ifdef CONFIG_KERNEL_COHERENCE
138 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
139#endif
140#endif
141}
142
143static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l)
144{
145 ARG_UNUSED(l);
146#ifdef CONFIG_SPIN_VALIDATE
147 z_spin_lock_set_owner(l);
148#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
149 l->lock_time = sys_clock_cycle_get_32();
150#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
151#endif /* CONFIG_SPIN_VALIDATE */
152}
153
183{
184 ARG_UNUSED(l);
186
187 /* Note that we need to use the underlying arch-specific lock
188 * implementation. The "irq_lock()" API in SMP context is
189 * actually a wrapper for a global spinlock!
190 */
191 k.key = arch_irq_lock();
192
193 z_spinlock_validate_pre(l);
194#ifdef CONFIG_SMP
195#ifdef CONFIG_TICKET_SPINLOCKS
196 /*
197 * Enqueue ourselves to the end of a spinlock waiters queue
198 * receiving a ticket
199 */
200 atomic_val_t ticket = atomic_inc(&l->tail);
201 /* Spin until our ticket is served */
202 while (atomic_get(&l->owner) != ticket) {
204 }
205#else
206 while (!atomic_cas(&l->locked, 0, 1)) {
208 }
209#endif /* CONFIG_TICKET_SPINLOCKS */
210#endif /* CONFIG_SMP */
211 z_spinlock_validate_post(l);
212
213 return k;
214}
215
231{
232 int key = arch_irq_lock();
233
234 z_spinlock_validate_pre(l);
235#ifdef CONFIG_SMP
236#ifdef CONFIG_TICKET_SPINLOCKS
237 /*
238 * atomic_get and atomic_cas operations below are not executed
239 * simultaneously.
240 * So in theory k_spin_trylock can lock an already locked spinlock.
241 * To reproduce this the following conditions should be met after we
242 * executed atomic_get and before we executed atomic_cas:
243 *
244 * - spinlock needs to be taken 0xffff_..._ffff + 1 times
245 * (which requires 0xffff_..._ffff number of CPUs, as k_spin_lock call
246 * is blocking) or
247 * - spinlock needs to be taken and released 0xffff_..._ffff times and
248 * then taken again
249 *
250 * In real-life systems this is considered non-reproducible given that
251 * required actions need to be done during this tiny window of several
252 * CPU instructions (which execute with interrupt locked,
253 * so no preemption can happen here)
254 */
255 atomic_val_t ticket_val = atomic_get(&l->owner);
256
257 if (!atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
258 goto busy;
259 }
260#else
261 if (!atomic_cas(&l->locked, 0, 1)) {
262 goto busy;
263 }
264#endif /* CONFIG_TICKET_SPINLOCKS */
265#endif /* CONFIG_SMP */
266 z_spinlock_validate_post(l);
267
268 k->key = key;
269
270 return 0;
271
272#ifdef CONFIG_SMP
273busy:
275 return -EBUSY;
276#endif /* CONFIG_SMP */
277}
278
302{
303 ARG_UNUSED(l);
304#ifdef CONFIG_SPIN_VALIDATE
305 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
306
307#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
308 uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
309
310 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
311 "Spin lock %p held %u cycles, longer than limit of %u cycles",
312 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
313#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
314#endif /* CONFIG_SPIN_VALIDATE */
315
316#ifdef CONFIG_SMP
317#ifdef CONFIG_TICKET_SPINLOCKS
318 /* Give the spinlock to the next CPU in a FIFO */
319 (void)atomic_inc(&l->owner);
320#else
321 /* Strictly we don't need atomic_clear() here (which is an
322 * exchange operation that returns the old value). We are always
323 * setting a zero and (because we hold the lock) know the existing
324 * state won't change due to a race. But some architectures need
325 * a memory barrier when used like this, and we don't have a
326 * Zephyr framework for that.
327 */
328 (void)atomic_clear(&l->locked);
329#endif /* CONFIG_TICKET_SPINLOCKS */
330#endif /* CONFIG_SMP */
331 arch_irq_unlock(key.key);
332}
333
338#if defined(CONFIG_SMP) && defined(CONFIG_TEST)
339/*
340 * @brief Checks if spinlock is held by some CPU, including the local CPU.
341 * This API shouldn't be used outside the tests for spinlock
342 *
343 * @param l A pointer to the spinlock
344 * @retval true - if spinlock is held by some CPU; false - otherwise
345 */
346static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l)
347{
348#ifdef CONFIG_TICKET_SPINLOCKS
349 atomic_val_t ticket_val = atomic_get(&l->owner);
350
351 return !atomic_cas(&l->tail, ticket_val, ticket_val);
352#else
353 return l->locked;
354#endif /* CONFIG_TICKET_SPINLOCKS */
355}
356#endif /* defined(CONFIG_SMP) && defined(CONFIG_TEST) */
357
358/* Internal function: releases the lock, but leaves local interrupts disabled */
359static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
360{
361 ARG_UNUSED(l);
362#ifdef CONFIG_SPIN_VALIDATE
363 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
364#endif
365#ifdef CONFIG_SMP
366#ifdef CONFIG_TICKET_SPINLOCKS
367 (void)atomic_inc(&l->owner);
368#else
369 (void)atomic_clear(&l->locked);
370#endif /* CONFIG_TICKET_SPINLOCKS */
371#endif /* CONFIG_SMP */
372}
373
374#if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
375static ALWAYS_INLINE void z_spin_onexit(__maybe_unused k_spinlock_key_t *k)
376{
377 __ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, "
378 "use K_SPINLOCK_BREAK instead.");
379}
380#define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
381#else
382#define K_SPINLOCK_ONEXIT
383#endif
384
395#define K_SPINLOCK_BREAK continue
396
438#define K_SPINLOCK(lck) \
439 for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key; \
440 k_spin_unlock((lck), __key), __i.key = 1)
441
444#ifdef __cplusplus
445}
446#endif
447
448#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition irq.h:176
void arch_spin_relax(void)
Perform architecture specific processing within spin loops.
long atomic_t
Definition atomic_types.h:15
atomic_t atomic_val_t
Definition atomic_types.h:16
static ALWAYS_INLINE atomic_val_t atomic_inc(atomic_t *target)
Implementation of atomic_inc.
Definition atomic_xtensa.h:112
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Implementation of atomic_cas.
Definition atomic_xtensa.h:62
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
Implementation of atomic_get.
Definition atomic_xtensa.h:18
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
Implementation of atomic_clear.
Definition atomic_xtensa.h:165
#define ALWAYS_INLINE
Definition common.h:129
System error numbers.
static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
Attempt to lock a spinlock.
Definition spinlock.h:230
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition spinlock.h:300
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition spinlock.h:182
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition spinlock.h:130
#define EBUSY
Mount device busy.
Definition errno.h:54
static k_spinlock_key_t key
Definition spinlock_error_case.c:15
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
Kernel Spin Lock.
Definition spinlock.h:45