Zephyr Project API  3.1.0
A Scalable Open Source RTOS
syscall.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
16#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
17#define ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
18
19/*
20 * Privileged mode system calls
21 */
22#define RV_ECALL_RUNTIME_EXCEPT 0
23#define RV_ECALL_IRQ_OFFLOAD 1
24
25#ifndef _ASMLANGUAGE
26
27#include <zephyr/types.h>
28#include <stdbool.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/*
35 * Syscall invocation macros. riscv-specific machine constraints used to ensure
36 * args land in the proper registers.
37 */
39 uintptr_t arg3, uintptr_t arg4,
40 uintptr_t arg5, uintptr_t arg6,
41 uintptr_t call_id)
42{
43 register ulong_t a0 __asm__ ("a0") = arg1;
44 register ulong_t a1 __asm__ ("a1") = arg2;
45 register ulong_t a2 __asm__ ("a2") = arg3;
46 register ulong_t a3 __asm__ ("a3") = arg4;
47 register ulong_t a4 __asm__ ("a4") = arg5;
48 register ulong_t a5 __asm__ ("a5") = arg6;
49 register ulong_t a7 __asm__ ("a7") = call_id;
50
51 __asm__ volatile ("ecall"
52 : "+r" (a0)
53 : "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5),
54 "r" (a7)
55 : "memory");
56 return a0;
57}
58
60 uintptr_t arg3, uintptr_t arg4,
61 uintptr_t arg5,
62 uintptr_t call_id)
63{
64 register ulong_t a0 __asm__ ("a0") = arg1;
65 register ulong_t a1 __asm__ ("a1") = arg2;
66 register ulong_t a2 __asm__ ("a2") = arg3;
67 register ulong_t a3 __asm__ ("a3") = arg4;
68 register ulong_t a4 __asm__ ("a4") = arg5;
69 register ulong_t a7 __asm__ ("a7") = call_id;
70
71 __asm__ volatile ("ecall"
72 : "+r" (a0)
73 : "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a7)
74 : "memory");
75 return a0;
76}
77
79 uintptr_t arg3, uintptr_t arg4,
80 uintptr_t call_id)
81{
82 register ulong_t a0 __asm__ ("a0") = arg1;
83 register ulong_t a1 __asm__ ("a1") = arg2;
84 register ulong_t a2 __asm__ ("a2") = arg3;
85 register ulong_t a3 __asm__ ("a3") = arg4;
86 register ulong_t a7 __asm__ ("a7") = call_id;
87
88 __asm__ volatile ("ecall"
89 : "+r" (a0)
90 : "r" (a1), "r" (a2), "r" (a3), "r" (a7)
91 : "memory");
92 return a0;
93}
94
96 uintptr_t arg3,
97 uintptr_t call_id)
98{
99 register ulong_t a0 __asm__ ("a0") = arg1;
100 register ulong_t a1 __asm__ ("a1") = arg2;
101 register ulong_t a2 __asm__ ("a2") = arg3;
102 register ulong_t a7 __asm__ ("a7") = call_id;
103
104 __asm__ volatile ("ecall"
105 : "+r" (a0)
106 : "r" (a1), "r" (a2), "r" (a7)
107 : "memory");
108 return a0;
109}
110
112 uintptr_t call_id)
113{
114 register ulong_t a0 __asm__ ("a0") = arg1;
115 register ulong_t a1 __asm__ ("a1") = arg2;
116 register ulong_t a7 __asm__ ("a7") = call_id;
117
118 __asm__ volatile ("ecall"
119 : "+r" (a0)
120 : "r" (a1), "r" (a7)
121 : "memory");
122 return a0;
123}
124
126{
127 register ulong_t a0 __asm__ ("a0") = arg1;
128 register ulong_t a7 __asm__ ("a7") = call_id;
129
130 __asm__ volatile ("ecall"
131 : "+r" (a0)
132 : "r" (a7)
133 : "memory");
134 return a0;
135}
136
138{
139 register ulong_t a0 __asm__ ("a0");
140 register ulong_t a7 __asm__ ("a7") = call_id;
141
142 __asm__ volatile ("ecall"
143 : "=r" (a0)
144 : "r" (a7)
145 : "memory");
146 return a0;
147}
148
149#ifdef CONFIG_USERSPACE
150static inline bool arch_is_user_context(void)
151{
152#ifdef CONFIG_SMP
153 /*
154 * This is painful. There is no way for u-mode code to know if we're
155 * currently executing in u-mode without generating a fault, besides
156 * stealing a general purpose register away from the standard ABI
157 * that is. And a global variable doesn't work on SMP as this must be
158 * per-CPU and we could be migrated to another CPU just at the right
159 * moment to peek at the wrong CPU variable (and u-mode can't disable
160 * preemption either).
161 *
162 * So, given that we'll have to pay the price of an exception entry
163 * anyway, let's at least make it free to privileged threads by using
164 * the mscratch register as the non-user context indicator (it must
165 * be zero in m-mode for exception entry to work properly). In the
166 * case of u-mode we'll simulate a proper return value in the
167 * exception trap code. Let's settle on the return value in t0
168 * and omit the volatile to give the compiler a chance to cache
169 * the result.
170 */
171 register ulong_t is_user __asm__ ("t0");
172 __asm__ ("csrr %0, mscratch" : "=r" (is_user));
173 return is_user != 0;
174#else
175 /* Defined in arch/riscv/core/thread.c */
176 extern uint32_t is_user_mode;
177 return is_user_mode;
178#endif
179}
180#endif
181
182#ifdef __cplusplus
183}
184#endif
185
186#endif /* _ASMLANGUAGE */
187#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_ */
static uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t call_id)
Definition: syscall.h:78
static uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, uintptr_t call_id)
Definition: syscall.h:111
static uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
Definition: syscall.h:125
static uintptr_t arch_syscall_invoke0(uintptr_t call_id)
Definition: syscall.h:137
static bool arch_is_user_context(void)
Definition: syscall.h:150
static uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t call_id)
Definition: syscall.h:59
static uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t call_id)
Definition: syscall.h:95
static uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, uintptr_t call_id)
Definition: syscall.h:38
unsigned long ulong_t
Definition: types.h:18
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105