Zephyr Project API  3.4.0
A Scalable Open Source RTOS
cache.h
Go to the documentation of this file.
1/*
2 * Copyright 2022 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
7#define ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
8
9#ifndef _ASMLANGUAGE
10
11#include <zephyr/types.h>
12#include <zephyr/sys/util.h>
13#include <zephyr/sys/barrier.h>
14#include <zephyr/arch/cpu.h>
15#include <errno.h>
16
17#ifdef __cplusplus
18extern "C" {
19#endif
20
21#define K_CACHE_WB BIT(0)
22#define K_CACHE_INVD BIT(1)
23#define K_CACHE_WB_INVD (K_CACHE_WB | K_CACHE_INVD)
24
25#if defined(CONFIG_DCACHE)
26
27#define CTR_EL0_DMINLINE_SHIFT 16
28#define CTR_EL0_DMINLINE_MASK BIT_MASK(4)
29#define CTR_EL0_CWG_SHIFT 24
30#define CTR_EL0_CWG_MASK BIT_MASK(4)
31
32/* clidr_el1 */
33#define CLIDR_EL1_LOC_SHIFT 24
34#define CLIDR_EL1_LOC_MASK BIT_MASK(3)
35#define CLIDR_EL1_CTYPE_SHIFT(level) ((level) * 3)
36#define CLIDR_EL1_CTYPE_MASK BIT_MASK(3)
37
38/* ccsidr_el1 */
39#define CCSIDR_EL1_LN_SZ_SHIFT 0
40#define CCSIDR_EL1_LN_SZ_MASK BIT_MASK(3)
41#define CCSIDR_EL1_WAYS_SHIFT 3
42#define CCSIDR_EL1_WAYS_MASK BIT_MASK(10)
43#define CCSIDR_EL1_SETS_SHIFT 13
44#define CCSIDR_EL1_SETS_MASK BIT_MASK(15)
45
46#define dc_ops(op, val) \
47({ \
48 __asm__ volatile ("dc " op ", %0" :: "r" (val) : "memory"); \
49})
50
51static size_t dcache_line_size;
52
53static ALWAYS_INLINE size_t arch_dcache_line_size_get(void)
54{
55 uint64_t ctr_el0;
56 uint32_t dminline;
57
58 if (dcache_line_size) {
59 return dcache_line_size;
60 }
61
62 ctr_el0 = read_sysreg(CTR_EL0);
63
64 dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
65
66 dcache_line_size = 4 << dminline;
67
68 return dcache_line_size;
69}
70
71/*
72 * operation for data cache by virtual address to PoC
73 * ops: K_CACHE_INVD: invalidate
74 * K_CACHE_WB: clean
75 * K_CACHE_WB_INVD: clean and invalidate
76 */
77static ALWAYS_INLINE int arm64_dcache_range(void *addr, size_t size, int op)
78{
79 size_t line_size;
80 uintptr_t start_addr = (uintptr_t)addr;
81 uintptr_t end_addr = start_addr + size;
82
83 if (op != K_CACHE_INVD && op != K_CACHE_WB && op != K_CACHE_WB_INVD) {
84 return -ENOTSUP;
85 }
86
87 line_size = arch_dcache_line_size_get();
88
89 /*
90 * For the data cache invalidate operation, clean and invalidate
91 * the partial cache lines at both ends of the given range to
92 * prevent data corruption.
93 *
94 * For example (assume cache line size is 64 bytes):
95 * There are 2 consecutive 32-byte buffers, which can be cached in
96 * one line like below.
97 * +------------------+------------------+
98 * Cache line: | buffer 0 (dirty) | buffer 1 |
99 * +------------------+------------------+
100 * For the start address not aligned case, when invalidate the
101 * buffer 1, the full cache line will be invalidated, if the buffer
102 * 0 is dirty, its data will be lost.
103 * The same logic applies to the not aligned end address.
104 */
105 if (op == K_CACHE_INVD) {
106 if (end_addr & (line_size - 1)) {
107 end_addr &= ~(line_size - 1);
108 dc_ops("civac", end_addr);
109 }
110
111 if (start_addr & (line_size - 1)) {
112 start_addr &= ~(line_size - 1);
113 if (start_addr == end_addr) {
114 goto done;
115 }
116 dc_ops("civac", start_addr);
117 start_addr += line_size;
118 }
119 }
120
121 /* Align address to line size */
122 start_addr &= ~(line_size - 1);
123
124 while (start_addr < end_addr) {
125 if (op == K_CACHE_INVD) {
126 dc_ops("ivac", start_addr);
127 } else if (op == K_CACHE_WB) {
128 dc_ops("cvac", start_addr);
129 } else if (op == K_CACHE_WB_INVD) {
130 dc_ops("civac", start_addr);
131 }
132
133 start_addr += line_size;
134 }
135
136done:
138
139 return 0;
140}
141
142/*
143 * operation for all data cache
144 * ops: K_CACHE_INVD: invalidate
145 * K_CACHE_WB: clean
146 * K_CACHE_WB_INVD: clean and invalidate
147 */
148static ALWAYS_INLINE int arm64_dcache_all(int op)
149{
150 uint32_t clidr_el1, csselr_el1, ccsidr_el1;
151 uint8_t loc, ctype, cache_level, line_size, way_pos;
152 uint32_t max_ways, max_sets, dc_val, set, way;
153
154 if (op != K_CACHE_INVD && op != K_CACHE_WB && op != K_CACHE_WB_INVD) {
155 return -ENOTSUP;
156 }
157
158 /* Data barrier before start */
160
161 clidr_el1 = read_clidr_el1();
162
163 loc = (clidr_el1 >> CLIDR_EL1_LOC_SHIFT) & CLIDR_EL1_LOC_MASK;
164 if (!loc) {
165 return 0;
166 }
167
168 for (cache_level = 0; cache_level < loc; cache_level++) {
169 ctype = (clidr_el1 >> CLIDR_EL1_CTYPE_SHIFT(cache_level))
170 & CLIDR_EL1_CTYPE_MASK;
171 /* No data cache, continue */
172 if (ctype < 2) {
173 continue;
174 }
175
176 /* select cache level */
177 csselr_el1 = cache_level << 1;
178 write_csselr_el1(csselr_el1);
180
181 ccsidr_el1 = read_ccsidr_el1();
182 line_size = (ccsidr_el1 >> CCSIDR_EL1_LN_SZ_SHIFT
183 & CCSIDR_EL1_LN_SZ_MASK) + 4;
184 max_ways = (ccsidr_el1 >> CCSIDR_EL1_WAYS_SHIFT)
185 & CCSIDR_EL1_WAYS_MASK;
186 max_sets = (ccsidr_el1 >> CCSIDR_EL1_SETS_SHIFT)
187 & CCSIDR_EL1_SETS_MASK;
188 /* 32-log2(ways), bit position of way in DC operand */
189 way_pos = __builtin_clz(max_ways);
190
191 for (set = 0; set <= max_sets; set++) {
192 for (way = 0; way <= max_ways; way++) {
193 /* way number, aligned to pos in DC operand */
194 dc_val = way << way_pos;
195 /* cache level, aligned to pos in DC operand */
196 dc_val |= csselr_el1;
197 /* set number, aligned to pos in DC operand */
198 dc_val |= set << line_size;
199
200 if (op == K_CACHE_INVD) {
201 dc_ops("isw", dc_val);
202 } else if (op == K_CACHE_WB_INVD) {
203 dc_ops("cisw", dc_val);
204 } else if (op == K_CACHE_WB) {
205 dc_ops("csw", dc_val);
206 }
207 }
208 }
209 }
210
211 /* Restore csselr_el1 to level 0 */
215
216 return 0;
217}
218
219static ALWAYS_INLINE int arch_dcache_flush_all(void)
220{
221 return arm64_dcache_all(K_CACHE_WB);
222}
223
224static ALWAYS_INLINE int arch_dcache_invd_all(void)
225{
226 return arm64_dcache_all(K_CACHE_INVD);
227}
228
229static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
230{
231 return arm64_dcache_all(K_CACHE_WB_INVD);
232}
233
234static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t size)
235{
236 return arm64_dcache_range(addr, size, K_CACHE_WB);
237}
238
239static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t size)
240{
241 return arm64_dcache_range(addr, size, K_CACHE_INVD);
242}
243
244static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t size)
245{
246 return arm64_dcache_range(addr, size, K_CACHE_WB_INVD);
247}
248
249static ALWAYS_INLINE void arch_dcache_enable(void)
250{
251 /* nothing */
252}
253
254static ALWAYS_INLINE void arch_dcache_disable(void)
255{
256 /* nothing */
257}
258
259#endif /* CONFIG_DCACHE */
260
261#if defined(CONFIG_ICACHE)
262
263static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
264{
265 return -ENOTSUP;
266}
267
268static ALWAYS_INLINE int arch_icache_flush_all(void)
269{
270 return -ENOTSUP;
271}
272
273static ALWAYS_INLINE int arch_icache_invd_all(void)
274{
275 return -ENOTSUP;
276}
277
278static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
279{
280 return -ENOTSUP;
281}
282
283static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
284{
285 ARG_UNUSED(addr);
286 ARG_UNUSED(size);
287 return -ENOTSUP;
288}
289
290static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
291{
292 ARG_UNUSED(addr);
293 ARG_UNUSED(size);
294 return -ENOTSUP;
295}
296
297static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
298{
299 ARG_UNUSED(addr);
300 ARG_UNUSED(size);
301 return -ENOTSUP;
302}
303
304static ALWAYS_INLINE void arch_icache_enable(void)
305{
306 /* nothing */
307}
308
309static ALWAYS_INLINE void arch_icache_disable(void)
310{
311 /* nothing */
312}
313
314#endif /* CONFIG_ICACHE */
315
316#ifdef __cplusplus
317}
318#endif
319
320#endif /* _ASMLANGUAGE */
321
322#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_ */
static ALWAYS_INLINE uint64_t read_clidr_el1(void)
Definition: lib_helpers.h:57
static ALWAYS_INLINE uint64_t read_ccsidr_el1(void)
Definition: lib_helpers.h:56
static ALWAYS_INLINE void write_csselr_el1(uint64_t val)
Definition: lib_helpers.h:67
#define read_sysreg(reg)
Definition: lib_helpers.h:99
#define K_CACHE_INVD
Definition: cache.h:22
#define K_CACHE_WB
Definition: cache.h:21
#define K_CACHE_WB_INVD
Definition: cache.h:23
#define ALWAYS_INLINE
Definition: common.h:124
System error numbers.
static ALWAYS_INLINE void barrier_isync_fence_full(void)
Full/sequentially-consistent instruction synchronization barrier.
Definition: barrier.h:76
static ALWAYS_INLINE void barrier_dsync_fence_full(void)
Full/sequentially-consistent data synchronization barrier.
Definition: barrier.h:57
#define ENOTSUP
Definition: errno.h:115
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
Misc utilities.