Zephyr Project API  3.3.0
A Scalable Open Source RTOS
mem_manage.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8#define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9
10#include <zephyr/sys/util.h>
11#include <zephyr/toolchain.h>
12#if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
14#endif
15
23/*
24 * Caching mode definitions. These are mutually exclusive.
25 */
26
28#define K_MEM_CACHE_NONE 2
29
31#define K_MEM_CACHE_WT 1
32
34#define K_MEM_CACHE_WB 0
35
36/*
37 * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
38 * pay attention to be not conflicted when updating these flags.
39 */
40
42#define K_MEM_CACHE_MASK (BIT(3) - 1)
43
44/*
45 * Region permission attributes. Default is read-only, no user, no exec
46 */
47
49#define K_MEM_PERM_RW BIT(3)
50
52#define K_MEM_PERM_EXEC BIT(4)
53
55#define K_MEM_PERM_USER BIT(5)
56
57/*
58 * This is the offset to subtract from a virtual address mapped in the
59 * kernel's permanent mapping of RAM, to obtain its physical address.
60 *
61 * virt_addr = phys_addr + Z_MEM_VM_OFFSET
62 *
63 * This only works for virtual addresses within the interval
64 * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
65 *
66 * These macros are intended for assembly, linker code, and static initializers.
67 * Use with care.
68 *
69 * Note that when demand paging is active, these will only work with page
70 * frames that are pinned to their virtual mapping at boot.
71 *
72 * TODO: This will likely need to move to an arch API or need additional
73 * constraints defined.
74 */
75#ifdef CONFIG_MMU
76#define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
77 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
78#else
79#define Z_MEM_VM_OFFSET 0
80#endif
81
82#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
83#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
84
85#if Z_MEM_VM_OFFSET != 0
86#define Z_VM_KERNEL 1
87#ifdef CONFIG_XIP
88#error "XIP and a virtual memory kernel are not allowed"
89#endif
90#endif
91
92#ifndef _ASMLANGUAGE
93#include <stdint.h>
94#include <stddef.h>
95#include <inttypes.h>
96#include <zephyr/sys/__assert.h>
97
99#ifdef CONFIG_DEMAND_PAGING_STATS
100 struct {
102 unsigned long cnt;
103
105 unsigned long irq_locked;
106
108 unsigned long irq_unlocked;
109
110#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
112 unsigned long in_isr;
113#endif
114 } pagefaults;
115
116 struct {
118 unsigned long clean;
119
121 unsigned long dirty;
122 } eviction;
123#endif /* CONFIG_DEMAND_PAGING_STATS */
124};
125
127#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
128 /* Counts for each bin in timing histogram */
129 unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
130
131 /* Bounds for the bins in timing histogram,
132 * excluding the first and last (hence, NUM_SLOTS - 1).
133 */
134 unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
135#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
136};
137
138/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
139static inline uintptr_t z_mem_phys_addr(void *virt)
140{
141 uintptr_t addr = (uintptr_t)virt;
142
143#ifdef CONFIG_MMU
144 __ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
145 (addr < (CONFIG_KERNEL_VM_BASE +
146 (CONFIG_KERNEL_VM_SIZE))),
147 "address %p not in permanent mappings", virt);
148#else
149 /* Should be identity-mapped */
150 __ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
151 (addr < (CONFIG_SRAM_BASE_ADDRESS +
152 (CONFIG_SRAM_SIZE * 1024UL))),
153 "physical address 0x%lx not in RAM",
154 (unsigned long)addr);
155#endif /* CONFIG_MMU */
156
157 /* TODO add assertion that this page is pinned to boot mapping,
158 * the above checks won't be sufficient with demand paging
159 */
160
161 return Z_MEM_PHYS_ADDR(addr);
162}
163
164/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
165static inline void *z_mem_virt_addr(uintptr_t phys)
166{
167 __ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
168 (phys < (CONFIG_SRAM_BASE_ADDRESS +
169 (CONFIG_SRAM_SIZE * 1024UL))),
170 "physical address 0x%lx not in RAM", (unsigned long)phys);
171
172 /* TODO add assertion that this page frame is pinned to boot mapping,
173 * the above check won't be sufficient with demand paging
174 */
175
176 return (void *)Z_MEM_VIRT_ADDR(phys);
177}
178
179#ifdef __cplusplus
180extern "C" {
181#endif
182
224void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
226
254void z_phys_unmap(uint8_t *virt, size_t size);
255
256/*
257 * k_mem_map() control flags
258 */
259
269#define K_MEM_MAP_UNINIT BIT(16)
270
278#define K_MEM_MAP_LOCK BIT(17)
279
291size_t k_mem_free_get(void);
292
332void *k_mem_map(size_t size, uint32_t flags);
333
347void k_mem_unmap(void *addr, size_t size);
348
362size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
363 uintptr_t addr, size_t size, size_t align);
364
398int k_mem_page_out(void *addr, size_t size);
399
413void k_mem_page_in(void *addr, size_t size);
414
428void k_mem_pin(void *addr, size_t size);
429
440void k_mem_unpin(void *addr, size_t size);
441
450__syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
451
452struct k_thread;
462__syscall
464 struct k_mem_paging_stats_t *stats);
465
475 struct k_mem_paging_histogram_t *hist);
476
486 struct k_mem_paging_histogram_t *hist);
487
497 struct k_mem_paging_histogram_t *hist);
498
499#include <syscalls/mem_manage.h>
500
525struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
526
535
581int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
582 uintptr_t *location,
583 bool page_fault);
584
596
609
622
643void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
644 uintptr_t location);
645
660
663#ifdef __cplusplus
664}
665#endif
666
667#endif /* !_ASMLANGUAGE */
668#endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
static struct k_thread thread[2]
Definition: atomic.c:26
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location)
void k_mem_paging_backing_store_page_out(uintptr_t location)
void k_mem_paging_backing_store_location_free(uintptr_t location)
void k_mem_paging_backing_store_init(void)
void k_mem_paging_backing_store_page_in(uintptr_t location)
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, bool page_fault)
struct z_page_frame * k_mem_paging_eviction_select(bool *dirty)
void k_mem_paging_eviction_init(void)
int k_mem_page_out(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t *hist)
void k_mem_unpin(void *addr, size_t size)
void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
void k_mem_pin(void *addr, size_t size)
void k_mem_page_in(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_thread_stats_get(struct k_thread *thread, struct k_mem_paging_stats_t *stats)
size_t k_mem_free_get(void)
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, uintptr_t addr, size_t size, size_t align)
void k_mem_unmap(void *addr, size_t size)
void * k_mem_map(size_t size, uint32_t flags)
flags
Definition: parser.h:96
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
Definition: mem_manage.h:126
Definition: mem_manage.h:98
Definition: thread.h:245
Macros to abstract toolchain specific capabilities.
Misc utilities.