Zephyr Project API  3.3.0
A Scalable Open Source RTOS
spinlock.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
12#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
13#define ZEPHYR_INCLUDE_SPINLOCK_H_
14
15#include <zephyr/sys/atomic.h>
16#include <zephyr/sys/__assert.h>
18#include <stdbool.h>
19#include <zephyr/arch/cpu.h>
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
32struct z_spinlock_key {
33 int key;
34};
35
43struct k_spinlock {
44#ifdef CONFIG_SMP
46#endif
47
48#ifdef CONFIG_SPIN_VALIDATE
49 /* Stores the thread that holds the lock with the locking CPU
50 * ID in the bottom two bits.
51 */
52 uintptr_t thread_cpu;
53#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
54 /* Stores the time (in cycles) when a lock was taken
55 */
56 uint32_t lock_time;
57#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
58#endif /* CONFIG_SPIN_VALIDATE */
59
60#if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
61 !defined(CONFIG_SPIN_VALIDATE)
62 /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
63 * the k_spinlock struct will have no members. The result
64 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
65 *
66 * This size difference causes problems when the k_spinlock
67 * is embedded into another struct like k_msgq, because C and
68 * C++ will have different ideas on the offsets of the members
69 * that come after the k_spinlock member.
70 *
71 * To prevent this we add a 1 byte dummy member to k_spinlock
72 * when the user selects C++ support and k_spinlock would
73 * otherwise be empty.
74 */
75 char dummy;
76#endif
77};
78
79/* There's a spinlock validation framework available when asserts are
80 * enabled. It adds a relatively hefty overhead (about 3k or so) to
81 * kernel code size, don't use on platforms known to be small.
82 */
83#ifdef CONFIG_SPIN_VALIDATE
84bool z_spin_lock_valid(struct k_spinlock *l);
85bool z_spin_unlock_valid(struct k_spinlock *l);
86void z_spin_lock_set_owner(struct k_spinlock *l);
87BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
88
89# ifdef CONFIG_KERNEL_COHERENCE
90bool z_spin_lock_mem_coherent(struct k_spinlock *l);
91# endif /* CONFIG_KERNEL_COHERENCE */
92
93#endif /* CONFIG_SPIN_VALIDATE */
94
106typedef struct z_spinlock_key k_spinlock_key_t;
107
137{
138 ARG_UNUSED(l);
140
141 /* Note that we need to use the underlying arch-specific lock
142 * implementation. The "irq_lock()" API in SMP context is
143 * actually a wrapper for a global spinlock!
144 */
145 k.key = arch_irq_lock();
146
147#ifdef CONFIG_SPIN_VALIDATE
148 __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
149# ifdef CONFIG_KERNEL_COHERENCE
150 __ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
151# endif
152#endif
153
154#ifdef CONFIG_SMP
155 while (!atomic_cas(&l->locked, 0, 1)) {
156 }
157#endif
158
159#ifdef CONFIG_SPIN_VALIDATE
160 z_spin_lock_set_owner(l);
161#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
162 l->lock_time = sys_clock_cycle_get_32();
163#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
164#endif/* CONFIG_SPIN_VALIDATE */
165 return k;
166}
167
191{
192 ARG_UNUSED(l);
193#ifdef CONFIG_SPIN_VALIDATE
194 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
195
196#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
197 uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
198
199 __ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
200 "Spin lock %p held %u cycles, longer than limit of %u cycles",
201 l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
202#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
203#endif /* CONFIG_SPIN_VALIDATE */
204
205#ifdef CONFIG_SMP
206 /* Strictly we don't need atomic_clear() here (which is an
207 * exchange operation that returns the old value). We are always
208 * setting a zero and (because we hold the lock) know the existing
209 * state won't change due to a race. But some architectures need
210 * a memory barrier when used like this, and we don't have a
211 * Zephyr framework for that.
212 */
213 atomic_clear(&l->locked);
214#endif
215 arch_irq_unlock(key.key);
216}
217
218/* Internal function: releases the lock, but leaves local interrupts
219 * disabled
220 */
222{
223 ARG_UNUSED(l);
224#ifdef CONFIG_SPIN_VALIDATE
225 __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
226#endif
227#ifdef CONFIG_SMP
228 atomic_clear(&l->locked);
229#endif
230}
231
234#ifdef __cplusplus
235}
236#endif
237
238#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Disable all interrupts on the local CPU.
Definition: irq.h:168
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: irq.h:176
long atomic_t
Definition: atomic.h:22
atomic_val_t atomic_clear(atomic_t *target)
static ALWAYS_INLINE bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
Definition: atomic_xtensa.h:42
#define ALWAYS_INLINE
Definition: common.h:124
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
Definition: spinlock.h:221
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
Unlock a spin lock.
Definition: spinlock.h:189
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
Lock a spinlock.
Definition: spinlock.h:136
struct z_spinlock_key k_spinlock_key_t
Spinlock key type.
Definition: spinlock.h:106
static k_spinlock_key_t key
Definition: spinlock_error_case.c:15
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
Kernel Spin Lock.
Definition: spinlock.h:43
atomic_t locked
Definition: spinlock.h:45