Zephyr Project API  3.4.0
A Scalable Open Source RTOS
mem_manage.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8#define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9
10#include <zephyr/sys/util.h>
11#include <zephyr/toolchain.h>
12#if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
14#endif
15
24/*
25 * Caching mode definitions. These are mutually exclusive.
26 */
27
29#define K_MEM_CACHE_NONE 2
30
32#define K_MEM_CACHE_WT 1
33
35#define K_MEM_CACHE_WB 0
36
37/*
38 * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
39 * pay attention to be not conflicted when updating these flags.
40 */
41
43#define K_MEM_CACHE_MASK (BIT(3) - 1)
44
45/*
46 * Region permission attributes. Default is read-only, no user, no exec
47 */
48
50#define K_MEM_PERM_RW BIT(3)
51
53#define K_MEM_PERM_EXEC BIT(4)
54
56#define K_MEM_PERM_USER BIT(5)
57
58/*
59 * Region mapping behaviour attributes
60 */
61
63#define K_MEM_DIRECT_MAP BIT(6)
64
65/*
66 * This is the offset to subtract from a virtual address mapped in the
67 * kernel's permanent mapping of RAM, to obtain its physical address.
68 *
69 * virt_addr = phys_addr + Z_MEM_VM_OFFSET
70 *
71 * This only works for virtual addresses within the interval
72 * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
73 *
74 * These macros are intended for assembly, linker code, and static initializers.
75 * Use with care.
76 *
77 * Note that when demand paging is active, these will only work with page
78 * frames that are pinned to their virtual mapping at boot.
79 *
80 * TODO: This will likely need to move to an arch API or need additional
81 * constraints defined.
82 */
83#ifdef CONFIG_MMU
84#define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
85 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
86#else
87#define Z_MEM_VM_OFFSET 0
88#endif
89
90#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
91#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
92
93#if Z_MEM_VM_OFFSET != 0
94#define Z_VM_KERNEL 1
95#ifdef CONFIG_XIP
96#error "XIP and a virtual memory kernel are not allowed"
97#endif
98#endif
99
100#ifndef _ASMLANGUAGE
101#include <stdint.h>
102#include <stddef.h>
103#include <inttypes.h>
104#include <zephyr/sys/__assert.h>
105
107#ifdef CONFIG_DEMAND_PAGING_STATS
108 struct {
110 unsigned long cnt;
111
113 unsigned long irq_locked;
114
116 unsigned long irq_unlocked;
117
118#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
120 unsigned long in_isr;
121#endif
122 } pagefaults;
123
124 struct {
126 unsigned long clean;
127
129 unsigned long dirty;
130 } eviction;
131#endif /* CONFIG_DEMAND_PAGING_STATS */
132};
133
135#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
136 /* Counts for each bin in timing histogram */
137 unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
138
139 /* Bounds for the bins in timing histogram,
140 * excluding the first and last (hence, NUM_SLOTS - 1).
141 */
142 unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
143#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
144};
145
146/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
147static inline uintptr_t z_mem_phys_addr(void *virt)
148{
149 uintptr_t addr = (uintptr_t)virt;
150
151#ifdef CONFIG_MMU
152 __ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
153 (addr < (CONFIG_KERNEL_VM_BASE +
154 (CONFIG_KERNEL_VM_SIZE))),
155 "address %p not in permanent mappings", virt);
156#else
157 /* Should be identity-mapped */
158 __ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
159 (addr < (CONFIG_SRAM_BASE_ADDRESS +
160 (CONFIG_SRAM_SIZE * 1024UL))),
161 "physical address 0x%lx not in RAM",
162 (unsigned long)addr);
163#endif /* CONFIG_MMU */
164
165 /* TODO add assertion that this page is pinned to boot mapping,
166 * the above checks won't be sufficient with demand paging
167 */
168
169 return Z_MEM_PHYS_ADDR(addr);
170}
171
172/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
173static inline void *z_mem_virt_addr(uintptr_t phys)
174{
175 __ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
176 (phys < (CONFIG_SRAM_BASE_ADDRESS +
177 (CONFIG_SRAM_SIZE * 1024UL))),
178 "physical address 0x%lx not in RAM", (unsigned long)phys);
179
180 /* TODO add assertion that this page frame is pinned to boot mapping,
181 * the above check won't be sufficient with demand paging
182 */
183
184 return (void *)Z_MEM_VIRT_ADDR(phys);
185}
186
187#ifdef __cplusplus
188extern "C" {
189#endif
190
232void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
234
262void z_phys_unmap(uint8_t *virt, size_t size);
263
264/*
265 * k_mem_map() control flags
266 */
267
277#define K_MEM_MAP_UNINIT BIT(16)
278
286#define K_MEM_MAP_LOCK BIT(17)
287
299size_t k_mem_free_get(void);
300
340void *k_mem_map(size_t size, uint32_t flags);
341
355void k_mem_unmap(void *addr, size_t size);
356
370size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
371 uintptr_t addr, size_t size, size_t align);
372
406int k_mem_page_out(void *addr, size_t size);
407
421void k_mem_page_in(void *addr, size_t size);
422
436void k_mem_pin(void *addr, size_t size);
437
448void k_mem_unpin(void *addr, size_t size);
449
458__syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
459
460struct k_thread;
470__syscall
472 struct k_mem_paging_stats_t *stats);
473
483 struct k_mem_paging_histogram_t *hist);
484
494 struct k_mem_paging_histogram_t *hist);
495
505 struct k_mem_paging_histogram_t *hist);
506
507#include <syscalls/mem_manage.h>
508
533struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
534
543
589int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
590 uintptr_t *location,
591 bool page_fault);
592
604
617
630
651void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
652 uintptr_t location);
653
668
671#ifdef __cplusplus
672}
673#endif
674
675#endif /* !_ASMLANGUAGE */
676#endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
static struct k_thread thread[2]
Definition: atomic.c:26
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location)
void k_mem_paging_backing_store_page_out(uintptr_t location)
void k_mem_paging_backing_store_location_free(uintptr_t location)
void k_mem_paging_backing_store_init(void)
void k_mem_paging_backing_store_page_in(uintptr_t location)
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, bool page_fault)
struct z_page_frame * k_mem_paging_eviction_select(bool *dirty)
void k_mem_paging_eviction_init(void)
int k_mem_page_out(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t *hist)
void k_mem_unpin(void *addr, size_t size)
void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
void k_mem_pin(void *addr, size_t size)
void k_mem_page_in(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_thread_stats_get(struct k_thread *thread, struct k_mem_paging_stats_t *stats)
size_t k_mem_free_get(void)
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, uintptr_t addr, size_t size, size_t align)
void k_mem_unmap(void *addr, size_t size)
void * k_mem_map(size_t size, uint32_t flags)
flags
Definition: parser.h:96
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
Definition: mem_manage.h:134
Definition: mem_manage.h:106
Definition: thread.h:245
Macros to abstract toolchain specific capabilities.
Misc utilities.