Zephyr Project API  3.3.0
A Scalable Open Source RTOS
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
17
24#if defined(CONFIG_USERSPACE)
26#endif /* CONFIG_USERSPACE */
27#include <zephyr/irq.h>
28#include <zephyr/sw_isr_table.h>
29#include <soc.h>
30#include <zephyr/devicetree.h>
33
34/* stacks, for RISCV architecture stack should be 16byte-aligned */
35#define ARCH_STACK_PTR_ALIGN 16
36
37#ifdef CONFIG_PMP_STACK_GUARD
38/*
39 * The StackGuard is an area at the bottom of the kernel-mode stack made to
40 * fault when accessed. It is _not_ faulting when in exception mode as we rely
41 * on that area to save the exception stack frame and to process said fault.
42 * Therefore the guard area must be large enough to hold the esf, plus some
43 * configurable stack wiggle room to execute the fault handling code off of,
44 * as well as some guard size to cover possible sudden stack pointer
45 * displacement before the fault.
46 *
47 * The m-mode PMP set is not overly used so no need to force NAPOT.
48 */
49#define Z_RISCV_STACK_GUARD_SIZE \
50 ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
51 ARCH_STACK_PTR_ALIGN)
52
53/* Kernel-only stacks have the following layout if a stack guard is enabled:
54 *
55 * +------------+ <- thread.stack_obj
56 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
57 * +------------+ <- thread.stack_info.start
58 * | Kernel |
59 * | stack |
60 * | |
61 * +............|
62 * | TLS | } thread.stack_info.delta
63 * +------------+ <- thread.stack_info.start + thread.stack_info.size
64 */
65#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
66
67#else /* !CONFIG_PMP_STACK_GUARD */
68#define Z_RISCV_STACK_GUARD_SIZE 0
69#endif
70
71#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
72/* The privilege elevation stack is located in another area of memory
73 * generated at build time by gen_kobject_list.py
74 *
75 * +------------+ <- thread.arch.priv_stack_start
76 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
77 * +------------+
78 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
79 * +------------+ <- thread.arch.priv_stack_start +
80 * CONFIG_PRIVILEGED_STACK_SIZE +
81 * Z_RISCV_STACK_GUARD_SIZE
82 *
83 * The main stack will be initially (or potentially only) used by kernel
84 * mode so we need to make room for a possible stack guard area when enabled:
85 *
86 * +------------+ <- thread.stack_obj
87 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
88 * +............| <- thread.stack_info.start
89 * | Thread |
90 * | stack |
91 * | |
92 * +............|
93 * | TLS | } thread.stack_info.delta
94 * +------------+ <- thread.stack_info.start + thread.stack_info.size
95 *
96 * When transitioning to user space, the guard area will be removed from
97 * the main stack. Any thread running in user mode will have full access
98 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
99 *
100 * +------------+ <- thread.stack_obj = thread.stack_info.start
101 * | Thread |
102 * | stack |
103 * | |
104 * +............|
105 * | TLS | } thread.stack_info.delta
106 * +------------+ <- thread.stack_info.start + thread.stack_info.size
107 */
108#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
109#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
110 Z_POW2_CEIL(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE))
111#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
112 ARCH_THREAD_STACK_SIZE_ADJUST(size)
113
114#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
115
116/* The stack object will contain the PMP guard, the privilege stack, and then
117 * the usermode stack buffer in that order:
118 *
119 * +------------+ <- thread.stack_obj
120 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
121 * +------------+
122 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
123 * +------------+ <- thread.stack_info.start
124 * | Thread |
125 * | stack |
126 * | |
127 * +............|
128 * | TLS | } thread.stack_info.delta
129 * +------------+ <- thread.stack_info.start + thread.stack_info.size
130 */
131#define ARCH_THREAD_STACK_RESERVED \
132 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
133 ARCH_STACK_PTR_ALIGN)
134
135#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
136
137#ifdef CONFIG_64BIT
138#define RV_REGSIZE 8
139#define RV_REGSHIFT 3
140#else
141#define RV_REGSIZE 4
142#define RV_REGSHIFT 2
143#endif
144
145/* Common mstatus bits. All supported cores today have the same
146 * layouts.
147 */
148
149#define MSTATUS_IEN (1UL << 3)
150#define MSTATUS_MPP_M (3UL << 11)
151#define MSTATUS_MPIE_EN (1UL << 7)
152
153#define MSTATUS_FS_OFF (0UL << 13)
154#define MSTATUS_FS_INIT (1UL << 13)
155#define MSTATUS_FS_CLEAN (2UL << 13)
156#define MSTATUS_FS_DIRTY (3UL << 13)
157
158/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
159 * platforms:
160 * - Preserve machine privileges in MPP. If you see any documentation
161 * telling you that MPP is read-only on this SoC, don't believe its
162 * lies.
163 * - Enable interrupts when exiting from exception into a new thread
164 * by setting MPIE now, so it will be copied into IE on mret.
165 */
166#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
167
168#ifndef _ASMLANGUAGE
169#include <zephyr/sys/util.h>
170
171#ifdef __cplusplus
172extern "C" {
173#endif
174
175#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
176#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
177#endif
178
179/* Kernel macros for memory attribution
180 * (access permissions and cache-ability).
181 *
182 * The macros are to be stored in k_mem_partition_attr_t
183 * objects. The format of a k_mem_partition_attr_t object
184 * is an uint8_t composed by configuration register flags
185 * located in arch/riscv/include/core_pmp.h
186 */
187
188/* Read-Write access permission attributes */
189#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
190 {PMP_R | PMP_W})
191#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
192 {PMP_R})
193#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
194 {0})
195#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
196 {PMP_R})
197#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
198 {0})
199#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
200 {0})
201
202/* Execution-allowed attributes */
203#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
204 {PMP_R | PMP_W | PMP_X})
205#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
206 {PMP_R | PMP_X})
207
208/* Typedef for the k_mem_partition attribute */
209typedef struct {
212
213struct arch_mem_domain {
214 unsigned int pmp_update_nr;
215};
216
217extern void z_irq_spurious(const void *unused);
218
219/*
220 * use atomic instruction csrrc to lock global irq
221 * csrrc: atomic read and clear bits in CSR register
222 */
223static ALWAYS_INLINE unsigned int arch_irq_lock(void)
224{
225#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
226 return z_soc_irq_lock();
227#else
228 unsigned int key;
229
230 __asm__ volatile ("csrrc %0, mstatus, %1"
231 : "=r" (key)
232 : "rK" (MSTATUS_IEN)
233 : "memory");
234
235 return key;
236#endif
237}
238
239/*
240 * use atomic instruction csrs to unlock global irq
241 * csrs: atomic set bits in CSR register
242 */
243static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
244{
245#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
246 z_soc_irq_unlock(key);
247#else
248 __asm__ volatile ("csrs mstatus, %0"
249 :
250 : "r" (key & MSTATUS_IEN)
251 : "memory");
252#endif
253}
254
255static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
256{
257#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
258 return z_soc_irq_unlocked(key);
259#else
260 return (key & MSTATUS_IEN) != 0;
261#endif
262}
263
264static ALWAYS_INLINE void arch_nop(void)
265{
266 __asm__ volatile("nop");
267}
268
270
271static inline uint32_t arch_k_cycle_get_32(void)
272{
273 return sys_clock_cycle_get_32();
274}
275
277
278static inline uint64_t arch_k_cycle_get_64(void)
279{
280 return sys_clock_cycle_get_64();
281}
282
284
285#ifdef __cplusplus
286}
287#endif
288
289#endif /*_ASMLANGUAGE */
290
291#if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGE)
293#endif
294
295
296#endif
uint32_t k_mem_partition_attr_t
Definition: arch.h:225
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition: common.h:124
Devicetree main header.
Public interface for configuring interrupts.
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:264
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition: arch.h:223
#define MSTATUS_IEN
Definition: arch.h:149
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: arch.h:243
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:271
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:278
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition: arch.h:255
RISCV public error handling.
RISCV public exception handling.
static k_spinlock_key_t key
Definition: spinlock_error_case.c:15
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
Definition: arch.h:46
unsigned int pmp_update_nr
Definition: arch.h:214
uint8_t pmp_attr
Definition: arch.h:210
Software-managed ISR table.
Misc utilities.