Zephyr Project API  3.1.0
A Scalable Open Source RTOS
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
17
23#if defined(CONFIG_USERSPACE)
25#endif /* CONFIG_USERSPACE */
26#include <zephyr/irq.h>
27#include <zephyr/sw_isr_table.h>
28#include <soc.h>
29#include <zephyr/devicetree.h>
32
33/* stacks, for RISCV architecture stack should be 16byte-aligned */
34#define ARCH_STACK_PTR_ALIGN 16
35
36#ifdef CONFIG_PMP_STACK_GUARD
37/*
38 * The StackGuard is an area at the bottom of the kernel-mode stack made to
39 * fault when accessed. It is _not_ faulting when in exception mode as we rely
40 * on that area to save the exception stack frame and to process said fault.
41 * Therefore the guard area must be large enough to hold the esf, plus some
42 * configurable stack wiggle room to execute the fault handling code off of,
43 * as well as some guard size to cover possible sudden stack pointer
44 * displacement before the fault.
45 *
46 * The m-mode PMP set is not overly used so no need to force NAPOT.
47 */
48#define Z_RISCV_STACK_GUARD_SIZE \
49 ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
50 ARCH_STACK_PTR_ALIGN)
51
52/* Kernel-only stacks have the following layout if a stack guard is enabled:
53 *
54 * +------------+ <- thread.stack_obj
55 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
56 * +------------+ <- thread.stack_info.start
57 * | Kernel |
58 * | stack |
59 * | |
60 * +............|
61 * | TLS | } thread.stack_info.delta
62 * +------------+ <- thread.stack_info.start + thread.stack_info.size
63 */
64#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
65
66#else /* !CONFIG_PMP_STACK_GUARD */
67#define Z_RISCV_STACK_GUARD_SIZE 0
68#endif
69
70#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
71/* The privilege elevation stack is located in another area of memory
72 * generated at build time by gen_kobject_list.py
73 *
74 * +------------+ <- thread.arch.priv_stack_start
75 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
76 * +------------+
77 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
78 * +------------+ <- thread.arch.priv_stack_start +
79 * CONFIG_PRIVILEGED_STACK_SIZE +
80 * Z_RISCV_STACK_GUARD_SIZE
81 *
82 * The main stack will be initially (or potentially only) used by kernel
83 * mode so we need to make room for a possible stack guard area when enabled:
84 *
85 * +------------+ <- thread.stack_obj
86 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
87 * +............| <- thread.stack_info.start
88 * | Thread |
89 * | stack |
90 * | |
91 * +............|
92 * | TLS | } thread.stack_info.delta
93 * +------------+ <- thread.stack_info.start + thread.stack_info.size
94 *
95 * When transitioning to user space, the guard area will be removed from
96 * the main stack. Any thread running in user mode will have full access
97 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
98 *
99 * +------------+ <- thread.stack_obj = thread.stack_info.start
100 * | Thread |
101 * | stack |
102 * | |
103 * +............|
104 * | TLS | } thread.stack_info.delta
105 * +------------+ <- thread.stack_info.start + thread.stack_info.size
106 */
107#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
108#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
109 Z_POW2_CEIL(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE))
110#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
111 ARCH_THREAD_STACK_SIZE_ADJUST(size)
112
113#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
114
115/* The stack object will contain the PMP guard, the privilege stack, and then
116 * the usermode stack buffer in that order:
117 *
118 * +------------+ <- thread.stack_obj
119 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
120 * +------------+
121 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
122 * +------------+ <- thread.stack_info.start
123 * | Thread |
124 * | stack |
125 * | |
126 * +............|
127 * | TLS | } thread.stack_info.delta
128 * +------------+ <- thread.stack_info.start + thread.stack_info.size
129 */
130#define ARCH_THREAD_STACK_RESERVED \
131 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
132 ARCH_STACK_PTR_ALIGN)
133
134#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
135
136#ifdef CONFIG_64BIT
137#define RV_REGSIZE 8
138#define RV_REGSHIFT 3
139#else
140#define RV_REGSIZE 4
141#define RV_REGSHIFT 2
142#endif
143
144/* Common mstatus bits. All supported cores today have the same
145 * layouts.
146 */
147
148#define MSTATUS_IEN (1UL << 3)
149#define MSTATUS_MPP_M (3UL << 11)
150#define MSTATUS_MPIE_EN (1UL << 7)
151#define MSTATUS_FS_INIT (1UL << 13)
152#define MSTATUS_FS_MASK ((1UL << 13) | (1UL << 14))
153
154
155/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
156 * platforms:
157 * - Preserve machine privileges in MPP. If you see any documentation
158 * telling you that MPP is read-only on this SoC, don't believe its
159 * lies.
160 * - Enable interrupts when exiting from exception into a new thread
161 * by setting MPIE now, so it will be copied into IE on mret.
162 */
163#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
164
165#ifndef _ASMLANGUAGE
166#include <zephyr/sys/util.h>
167
168#ifdef __cplusplus
169extern "C" {
170#endif
171
172/* Kernel macros for memory attribution
173 * (access permissions and cache-ability).
174 *
175 * The macros are to be stored in k_mem_partition_attr_t
176 * objects. The format of a k_mem_partition_attr_t object
177 * is an uint8_t composed by configuration register flags
178 * located in arch/riscv/include/core_pmp.h
179 */
180
181/* Read-Write access permission attributes */
182#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
183 {PMP_R | PMP_W})
184#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
185 {PMP_R})
186#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
187 {0})
188#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
189 {PMP_R})
190#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
191 {0})
192#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
193 {0})
194
195/* Execution-allowed attributes */
196#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
197 {PMP_R | PMP_W | PMP_X})
198#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
199 {PMP_R | PMP_X})
200
201/* Typedef for the k_mem_partition attribute */
202typedef struct {
205
206struct arch_mem_domain {
207 unsigned int pmp_update_nr;
208};
209
210void arch_irq_enable(unsigned int irq);
211void arch_irq_disable(unsigned int irq);
212int arch_irq_is_enabled(unsigned int irq);
213void arch_irq_priority_set(unsigned int irq, unsigned int prio);
214void z_irq_spurious(const void *unused);
215
216#if defined(CONFIG_RISCV_HAS_PLIC)
217#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
218{ \
219 Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
220 arch_irq_priority_set(irq_p, priority_p); \
221}
222#elif defined(CONFIG_NUCLEI_ECLIC)
223void nuclei_eclic_irq_priority_set(unsigned int irq, unsigned int prio, unsigned int flags);
224#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
225{ \
226 Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
227 nuclei_eclic_irq_priority_set(irq_p, priority_p, flags_p); \
228}
229#else
230#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
231{ \
232 Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
233}
234#endif
235
236/*
237 * use atomic instruction csrrc to lock global irq
238 * csrrc: atomic read and clear bits in CSR register
239 */
240static ALWAYS_INLINE unsigned int arch_irq_lock(void)
241{
242 unsigned int key;
243
244 __asm__ volatile ("csrrc %0, mstatus, %1"
245 : "=r" (key)
246 : "rK" (MSTATUS_IEN)
247 : "memory");
248
249 return key;
250}
251
252/*
253 * use atomic instruction csrs to unlock global irq
254 * csrs: atomic set bits in CSR register
255 */
256static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
257{
258 __asm__ volatile ("csrs mstatus, %0"
259 :
260 : "r" (key & MSTATUS_IEN)
261 : "memory");
262}
263
264static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
265{
266 return (key & MSTATUS_IEN) != 0;
267}
268
269static ALWAYS_INLINE void arch_nop(void)
270{
271 __asm__ volatile("nop");
272}
273
275
276static inline uint32_t arch_k_cycle_get_32(void)
277{
278 return sys_clock_cycle_get_32();
279}
280
282
283static inline uint64_t arch_k_cycle_get_64(void)
284{
285 return sys_clock_cycle_get_64();
286}
287
289
290#ifdef __cplusplus
291}
292#endif
293
294#endif /*_ASMLANGUAGE */
295
296#if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGE)
298#endif
299
300
301#endif
uint32_t k_mem_partition_attr_t
Definition: arch.h:210
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition: common.h:124
Devicetree main header.
flags
Definition: http_parser.h:131
Public interface for configuring interrupts.
void arch_irq_disable(unsigned int irq)
uint64_t sys_clock_cycle_get_64(void)
int arch_irq_is_enabled(unsigned int irq)
uint32_t sys_clock_cycle_get_32(void)
void arch_irq_enable(unsigned int irq)
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:269
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition: arch.h:240
#define MSTATUS_IEN
Definition: arch.h:148
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: arch.h:256
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:276
void arch_irq_priority_set(unsigned int irq, unsigned int prio)
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:283
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition: arch.h:264
RISCV public error handling.
RISCV public exception handling.
static k_spinlock_key_t key
Definition: spinlock_error_case.c:14
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
Definition: arch.h:46
unsigned int pmp_update_nr
Definition: arch.h:207
uint8_t pmp_attr
Definition: arch.h:203
Software-managed ISR table.
Misc utilities.