Zephyr Project API 3.5.0
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
mem_manage.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8#define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9
10#include <zephyr/sys/util.h>
11#include <zephyr/toolchain.h>
12#if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
14#endif
15
24/*
25 * Caching mode definitions. These are mutually exclusive.
26 */
27
29#define K_MEM_CACHE_NONE 2
30
32#define K_MEM_CACHE_WT 1
33
35#define K_MEM_CACHE_WB 0
36
37/*
38 * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
39 * pay attention to be not conflicted when updating these flags.
40 */
41
43#define K_MEM_CACHE_MASK (BIT(3) - 1)
44
45/*
46 * Region permission attributes. Default is read-only, no user, no exec
47 */
48
50#define K_MEM_PERM_RW BIT(3)
51
53#define K_MEM_PERM_EXEC BIT(4)
54
56#define K_MEM_PERM_USER BIT(5)
57
58/*
59 * Region mapping behaviour attributes
60 */
61
63#define K_MEM_DIRECT_MAP BIT(6)
64
65/*
66 * This is the offset to subtract from a virtual address mapped in the
67 * kernel's permanent mapping of RAM, to obtain its physical address.
68 *
69 * virt_addr = phys_addr + Z_MEM_VM_OFFSET
70 *
71 * This only works for virtual addresses within the interval
72 * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
73 *
74 * These macros are intended for assembly, linker code, and static initializers.
75 * Use with care.
76 *
77 * Note that when demand paging is active, these will only work with page
78 * frames that are pinned to their virtual mapping at boot.
79 *
80 * TODO: This will likely need to move to an arch API or need additional
81 * constraints defined.
82 */
83#ifdef CONFIG_MMU
84#define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
85 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
86#else
87#define Z_MEM_VM_OFFSET 0
88#endif
89
90#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
91#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
92
93#if Z_MEM_VM_OFFSET != 0
94#define Z_VM_KERNEL 1
95#ifdef CONFIG_XIP
96#error "XIP and a virtual memory kernel are not allowed"
97#endif
98#endif
99
100#ifndef _ASMLANGUAGE
101#include <stdint.h>
102#include <stddef.h>
103#include <inttypes.h>
104#include <zephyr/sys/__assert.h>
105
107#ifdef CONFIG_DEMAND_PAGING_STATS
108 struct {
110 unsigned long cnt;
111
113 unsigned long irq_locked;
114
116 unsigned long irq_unlocked;
117
118#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
120 unsigned long in_isr;
121#endif
122 } pagefaults;
123
124 struct {
126 unsigned long clean;
127
129 unsigned long dirty;
130 } eviction;
131#endif /* CONFIG_DEMAND_PAGING_STATS */
132};
133
135#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
136 /* Counts for each bin in timing histogram */
137 unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
138
139 /* Bounds for the bins in timing histogram,
140 * excluding the first and last (hence, NUM_SLOTS - 1).
141 */
142 unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
143#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
144};
145
146/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
147static inline uintptr_t z_mem_phys_addr(void *virt)
148{
149 uintptr_t addr = (uintptr_t)virt;
150
151#ifdef CONFIG_MMU
152 __ASSERT(
153#if CONFIG_KERNEL_VM_BASE != 0
154 (addr >= CONFIG_KERNEL_VM_BASE) &&
155#endif
156#if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0
157 (addr < (CONFIG_KERNEL_VM_BASE +
158 (CONFIG_KERNEL_VM_SIZE))),
159#else
160 false,
161#endif
162 "address %p not in permanent mappings", virt);
163#else
164 /* Should be identity-mapped */
165 __ASSERT(
166#if CONFIG_SRAM_BASE_ADDRESS != 0
167 (addr >= CONFIG_SRAM_BASE_ADDRESS) &&
168#endif
169#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
170 (addr < (CONFIG_SRAM_BASE_ADDRESS +
171 (CONFIG_SRAM_SIZE * 1024UL))),
172#else
173 false,
174#endif
175 "physical address 0x%lx not in RAM",
176 (unsigned long)addr);
177#endif /* CONFIG_MMU */
178
179 /* TODO add assertion that this page is pinned to boot mapping,
180 * the above checks won't be sufficient with demand paging
181 */
182
183 return Z_MEM_PHYS_ADDR(addr);
184}
185
186/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
187static inline void *z_mem_virt_addr(uintptr_t phys)
188{
189 __ASSERT(
190#if CONFIG_SRAM_BASE_ADDRESS != 0
191 (phys >= CONFIG_SRAM_BASE_ADDRESS) &&
192#endif
193#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
194 (phys < (CONFIG_SRAM_BASE_ADDRESS +
195 (CONFIG_SRAM_SIZE * 1024UL))),
196#else
197 false,
198#endif
199 "physical address 0x%lx not in RAM", (unsigned long)phys);
200
201 /* TODO add assertion that this page frame is pinned to boot mapping,
202 * the above check won't be sufficient with demand paging
203 */
204
205 return (void *)Z_MEM_VIRT_ADDR(phys);
206}
207
208#ifdef __cplusplus
209extern "C" {
210#endif
211
253void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
255
283void z_phys_unmap(uint8_t *virt, size_t size);
284
285/*
286 * k_mem_map() control flags
287 */
288
298#define K_MEM_MAP_UNINIT BIT(16)
299
307#define K_MEM_MAP_LOCK BIT(17)
308
320size_t k_mem_free_get(void);
321
361void *k_mem_map(size_t size, uint32_t flags);
362
376void k_mem_unmap(void *addr, size_t size);
377
391size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
392 uintptr_t addr, size_t size, size_t align);
393
427int k_mem_page_out(void *addr, size_t size);
428
442void k_mem_page_in(void *addr, size_t size);
443
457void k_mem_pin(void *addr, size_t size);
458
469void k_mem_unpin(void *addr, size_t size);
470
479__syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
480
481struct k_thread;
491__syscall
493 struct k_mem_paging_stats_t *stats);
494
504 struct k_mem_paging_histogram_t *hist);
505
515 struct k_mem_paging_histogram_t *hist);
516
526 struct k_mem_paging_histogram_t *hist);
527
528#include <syscalls/mem_manage.h>
529
554struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
555
564
610int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
611 uintptr_t *location,
612 bool page_fault);
613
625
638
651
672void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
673 uintptr_t location);
674
689
692#ifdef __cplusplus
693}
694#endif
695
696#endif /* !_ASMLANGUAGE */
697#endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
static struct k_thread thread[2]
Definition atomic.c:26
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location)
Update internal accounting after a page-in.
void k_mem_paging_backing_store_page_out(uintptr_t location)
Copy a data page from Z_SCRATCH_PAGE to the specified location.
void k_mem_paging_backing_store_location_free(uintptr_t location)
Free a backing store location.
void k_mem_paging_backing_store_init(void)
Backing store initialization function.
void k_mem_paging_backing_store_page_in(uintptr_t location)
Copy a data page from the provided location to Z_SCRATCH_PAGE.
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, bool page_fault)
Reserve or fetch a storage location for a data page loaded into a page frame.
struct z_page_frame * k_mem_paging_eviction_select(bool *dirty)
Select a page frame for eviction.
void k_mem_paging_eviction_init(void)
Initialization function.
int k_mem_page_out(void *addr, size_t size)
Evict a page-aligned virtual memory region to the backing store.
void k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t *hist)
Get the backing store page-in timing histogram.
void k_mem_unpin(void *addr, size_t size)
Un-pin an aligned virtual data region.
void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
Get the paging statistics since system startup.
void k_mem_pin(void *addr, size_t size)
Pin an aligned virtual data region, paging in as necessary.
void k_mem_page_in(void *addr, size_t size)
Load a virtual data region into memory.
void k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t *hist)
Get the backing store page-out timing histogram.
void k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t *hist)
Get the eviction timing histogram.
void k_mem_paging_thread_stats_get(struct k_thread *thread, struct k_mem_paging_stats_t *stats)
Get the paging statistics since system startup for a thread.
size_t k_mem_free_get(void)
Return the amount of free memory available.
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, uintptr_t addr, size_t size, size_t align)
Given an arbitrary region, provide a aligned region that covers it.
void k_mem_unmap(void *addr, size_t size)
Un-map mapped memory.
void * k_mem_map(size_t size, uint32_t flags)
Map anonymous memory into Zephyr's address space.
flags
Definition parser.h:96
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
Definition mem_manage.h:134
Definition mem_manage.h:106
Thread Structure.
Definition thread.h:250
Macros to abstract toolchain specific capabilities.
Misc utilities.