Zephyr Project API  3.1.0
A Scalable Open Source RTOS
cache.h
Go to the documentation of this file.
1/*
2 * Copyright 2021 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
6#define ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
7
8#include <xtensa/config/core-isa.h>
9#include <zephyr/toolchain.h>
10#include <zephyr/sys/util.h>
11#include <zephyr/debug/sparse.h>
12
13#ifdef __cplusplus
14extern "C" {
15#endif
16
17#define Z_DCACHE_MAX (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
18
19#if XCHAL_DCACHE_SIZE
20BUILD_ASSERT(Z_IS_POW2(XCHAL_DCACHE_LINESIZE));
21BUILD_ASSERT(Z_IS_POW2(Z_DCACHE_MAX));
22#endif
23
24static ALWAYS_INLINE void z_xtensa_cache_flush(void *addr, size_t bytes)
25{
26#if XCHAL_DCACHE_SIZE
27 size_t step = XCHAL_DCACHE_LINESIZE;
28 size_t first = ROUND_DOWN(addr, step);
29 size_t last = ROUND_UP(((long)addr) + bytes, step);
30 size_t line;
31
32 for (line = first; bytes && line < last; line += step) {
33 __asm__ volatile("dhwb %0, 0" :: "r"(line));
34 }
35#endif
36}
37
38static ALWAYS_INLINE void z_xtensa_cache_flush_inv(void *addr, size_t bytes)
39{
40#if XCHAL_DCACHE_SIZE
41 size_t step = XCHAL_DCACHE_LINESIZE;
42 size_t first = ROUND_DOWN(addr, step);
43 size_t last = ROUND_UP(((long)addr) + bytes, step);
44 size_t line;
45
46 for (line = first; bytes && line < last; line += step) {
47 __asm__ volatile("dhwbi %0, 0" :: "r"(line));
48 }
49#endif
50}
51
52static ALWAYS_INLINE void z_xtensa_cache_inv(void *addr, size_t bytes)
53{
54#if XCHAL_DCACHE_SIZE
55 size_t step = XCHAL_DCACHE_LINESIZE;
56 size_t first = ROUND_DOWN(addr, step);
57 size_t last = ROUND_UP(((long)addr) + bytes, step);
58 size_t line;
59
60 for (line = first; bytes && line < last; line += step) {
61 __asm__ volatile("dhi %0, 0" :: "r"(line));
62 }
63#endif
64}
65
66static ALWAYS_INLINE void z_xtensa_cache_inv_all(void)
67{
68 z_xtensa_cache_inv(NULL, Z_DCACHE_MAX);
69}
70
71static ALWAYS_INLINE void z_xtensa_cache_flush_all(void)
72{
73 z_xtensa_cache_flush(NULL, Z_DCACHE_MAX);
74}
75
76static ALWAYS_INLINE void z_xtensa_cache_flush_inv_all(void)
77{
78 z_xtensa_cache_flush_inv(NULL, Z_DCACHE_MAX);
79}
80
81#ifdef CONFIG_ARCH_HAS_COHERENCE
82static inline bool arch_mem_coherent(void *ptr)
83{
84 size_t addr = (size_t) ptr;
85
86 return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
87}
88#endif
89
90static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
91{
92 /* The math here is all compile-time: when the two regions
93 * differ by a power of two, we can convert between them by
94 * setting or clearing just one bit. Otherwise it needs two
95 * operations.
96 */
97 uint32_t rxor = (rto ^ rfrom) << 29;
98
99 rto <<= 29;
100 if (Z_IS_POW2(rxor)) {
101 if ((rxor & rto) == 0) {
102 return addr & ~rxor;
103 } else {
104 return addr | rxor;
105 }
106 } else {
107 return (addr & ~(7U << 29)) | rto;
108 }
109}
110
131static inline void __sparse_cache *arch_xtensa_cached_ptr(void *ptr)
132{
133 return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
134 CONFIG_XTENSA_CACHED_REGION,
135 CONFIG_XTENSA_UNCACHED_REGION);
136}
137
156static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
157{
158 return (void *)z_xtrpoflip((uint32_t) ptr,
159 CONFIG_XTENSA_UNCACHED_REGION,
160 CONFIG_XTENSA_CACHED_REGION);
161}
162
163/* Utility to generate an unrolled and optimal[1] code sequence to set
164 * the RPO TLB registers (contra the HAL cacheattr macros, which
165 * generate larger code and can't be called from C), based on the
166 * KERNEL_COHERENCE configuration in use. Selects RPO attribute "2"
167 * for regions (including MMIO registers in region zero) which want to
168 * bypass L1, "4" for the cached region which wants writeback, and
169 * "15" (invalid) elsewhere.
170 *
171 * Note that on cores that have the "translation" option set, we need
172 * to put an identity mapping in the high bits. Also per spec
173 * changing the current code region (by definition cached) requires
174 * that WITLB be followed by an ISYNC and that both instructions live
175 * in the same cache line (two 3-byte instructions fit in an 8-byte
176 * aligned region, so that's guaranteed not to cross a cache line
177 * boundary).
178 *
179 * [1] With the sole exception of gcc's infuriating insistence on
180 * emitting a precomputed literal for addr + addrincr instead of
181 * computing it with a single ADD instruction from values it already
182 * has in registers. Explicitly assigning the variables to registers
183 * via an attribute works, but then emits needless MOV instructions
184 * instead. I tell myself it's just 32 bytes of .text, but... Sigh.
185 */
186#define _REGION_ATTR(r) \
187 ((r) == 0 ? 2 : \
188 ((r) == CONFIG_XTENSA_CACHED_REGION ? 4 : \
189 ((r) == CONFIG_XTENSA_UNCACHED_REGION ? 2 : 15)))
190
191#define _SET_ONE_TLB(region) do { \
192 uint32_t attr = _REGION_ATTR(region); \
193 if (XCHAL_HAVE_XLT_CACHEATTR) { \
194 attr |= addr; /* RPO with translation */ \
195 } \
196 if (region != CONFIG_XTENSA_CACHED_REGION) { \
197 __asm__ volatile("wdtlb %0, %1; witlb %0, %1" \
198 :: "r"(attr), "r"(addr)); \
199 } else { \
200 __asm__ volatile("wdtlb %0, %1" \
201 :: "r"(attr), "r"(addr)); \
202 __asm__ volatile("j 1f; .align 8; 1:"); \
203 __asm__ volatile("witlb %0, %1; isync" \
204 :: "r"(attr), "r"(addr)); \
205 } \
206 addr += addrincr; \
207} while (0)
208
209#define ARCH_XTENSA_SET_RPO_TLB() do { \
210 register uint32_t addr = 0, addrincr = 0x20000000; \
211 FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
212} while (0)
213
214#ifdef __cplusplus
215} /* extern "C" */
216#endif
217
218#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_ */
static void * arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
Return uncached pointer to a RAM address.
Definition: cache.h:156
static void __sparse_cache * arch_xtensa_cached_ptr(void *ptr)
Return cached pointer to a RAM address.
Definition: cache.h:131
#define ALWAYS_INLINE
Definition: common.h:124
static bool arch_mem_coherent(void *ptr)
Detect memory coherence type.
Definition: arch_interface.h:797
#define ROUND_UP(x, align)
Value of x rounded up to the next multiple of align, which must be a power of 2.
Definition: util.h:154
#define ROUND_DOWN(x, align)
Value of x rounded down to the previous multiple of align, which must be a power of 2.
Definition: util.h:162
void * ptr
Definition: printk.c:79
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
Macros to abstract toolchain specific capabilities.
Misc utilities.