Zephyr Project API 3.7.0
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
main.c File Reference
#include <zephyr/ztest.h>
#include <zephyr/toolchain.h>
#include <mmu.h>
#include <zephyr/linker/sections.h>

Macros

#define BASE_FLAGS   (K_MEM_CACHE_WB)
 
#define BUF_SIZE   (CONFIG_MMU_PAGE_SIZE + 907)
 
#define BUF_OFFSET   1238
 
#define TEST_PAGE_SZ   ROUND_UP(BUF_OFFSET + BUF_SIZE, CONFIG_MMU_PAGE_SIZE)
 
#define USER_STACKSIZE   (128)
 

Functions

void k_sys_fatal_error_handler (unsigned int reason, const struct arch_esf *pEsf)
 Fatal error policy handler.
 
 ZTEST (mem_map, test_k_mem_map_phys_bare_rw)
 Show that mapping an irregular size buffer works and RW flag is respected.
 
 ZTEST (mem_map, test_k_mem_map_phys_bare_exec)
 Show that mapping with/without K_MEM_PERM_EXEC works as expected.
 
 ZTEST (mem_map, test_k_mem_map_phys_bare_side_effect)
 Show that memory mapping doesn't have unintended side effects.
 
 ZTEST (mem_map, test_k_mem_unmap_phys_bare)
 Test that k_mem_unmap_phys_bare() unmaps the memory and it is no longer accessible afterwards.
 
 ZTEST (mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
 Show that k_mem_unmap_phys_bare() can reclaim the virtual region correctly.
 
 ZTEST (mem_map_api, test_k_mem_map_unmap)
 Basic k_mem_map() and k_mem_unmap() functionality.
 
 ZTEST (mem_map_api, test_k_mem_map_guard_before)
 Test that the "before" guard page is in place for k_mem_map().
 
 ZTEST (mem_map_api, test_k_mem_map_guard_after)
 Test that the "after" guard page is in place for k_mem_map().
 
 ZTEST (mem_map_api, test_k_mem_map_exhaustion)
 
 K_THREAD_STACK_DEFINE (user_stack,(128))
 
 K_APPMEM_PARTITION_DEFINE (default_part)
 
 K_APP_DMEM (default_part)
 
 ZTEST (mem_map_api, test_k_mem_map_user)
 Test that the allocated region will be only accessible to userspace when K_MEM_PERM_USER is used.
 
void * mem_map_env_setup (void)
 
 ZTEST_SUITE (mem_map, NULL, NULL, NULL, NULL, NULL)
 
 ZTEST_SUITE (mem_map_api, NULL, mem_map_env_setup, NULL, NULL, NULL)
 

Variables

volatile bool expect_fault
 
static __pinned_noinit uint8_t test_page [ROUND_UP(1238+(CONFIG_MMU_PAGE_SIZE+907), CONFIG_MMU_PAGE_SIZE)]
 
struct k_thread user_thread
 

Macro Definition Documentation

◆ BASE_FLAGS

#define BASE_FLAGS   (K_MEM_CACHE_WB)

◆ BUF_OFFSET

#define BUF_OFFSET   1238

◆ BUF_SIZE

#define BUF_SIZE   (CONFIG_MMU_PAGE_SIZE + 907)

◆ TEST_PAGE_SZ

#define TEST_PAGE_SZ   ROUND_UP(BUF_OFFSET + BUF_SIZE, CONFIG_MMU_PAGE_SIZE)

◆ USER_STACKSIZE

#define USER_STACKSIZE   (128)

Function Documentation

◆ K_APP_DMEM()

K_APP_DMEM ( default_part  )

◆ K_APPMEM_PARTITION_DEFINE()

K_APPMEM_PARTITION_DEFINE ( default_part  )

◆ K_THREAD_STACK_DEFINE()

K_THREAD_STACK_DEFINE ( user_stack  ,
(128)   
)

◆ mem_map_env_setup()

void * mem_map_env_setup ( void  )

◆ ZTEST() [1/10]

ZTEST ( mem_map  ,
test_k_mem_map_phys_bare_exec   
)

Show that mapping with/without K_MEM_PERM_EXEC works as expected.

◆ ZTEST() [2/10]

ZTEST ( mem_map  ,
test_k_mem_map_phys_bare_rw   
)

Show that mapping an irregular size buffer works and RW flag is respected.

◆ ZTEST() [3/10]

ZTEST ( mem_map  ,
test_k_mem_map_phys_bare_side_effect   
)

Show that memory mapping doesn't have unintended side effects.

◆ ZTEST() [4/10]

ZTEST ( mem_map  ,
test_k_mem_map_phys_bare_unmap_reclaim_addr   
)

Show that k_mem_unmap_phys_bare() can reclaim the virtual region correctly.

◆ ZTEST() [5/10]

ZTEST ( mem_map  ,
test_k_mem_unmap_phys_bare   
)

Test that k_mem_unmap_phys_bare() unmaps the memory and it is no longer accessible afterwards.

◆ ZTEST() [6/10]

ZTEST ( mem_map_api  ,
test_k_mem_map_exhaustion   
)

◆ ZTEST() [7/10]

ZTEST ( mem_map_api  ,
test_k_mem_map_guard_after   
)

Test that the "after" guard page is in place for k_mem_map().

◆ ZTEST() [8/10]

ZTEST ( mem_map_api  ,
test_k_mem_map_guard_before   
)

Test that the "before" guard page is in place for k_mem_map().

◆ ZTEST() [9/10]

ZTEST ( mem_map_api  ,
test_k_mem_map_unmap   
)

Basic k_mem_map() and k_mem_unmap() functionality.

Does not exercise K_MEM_MAP_* control flags, just default behavior

◆ ZTEST() [10/10]

ZTEST ( mem_map_api  ,
test_k_mem_map_user   
)

Test that the allocated region will be only accessible to userspace when K_MEM_PERM_USER is used.

◆ ZTEST_SUITE() [1/2]

ZTEST_SUITE ( mem_map  ,
NULL  ,
NULL  ,
NULL  ,
NULL  ,
NULL   
)

◆ ZTEST_SUITE() [2/2]

ZTEST_SUITE ( mem_map_api  ,
NULL  ,
mem_map_env_setup  ,
NULL  ,
NULL  ,
NULL   
)

Variable Documentation

◆ expect_fault

volatile bool expect_fault

◆ test_page

__pinned_noinit uint8_t test_page[ROUND_UP(1238+(CONFIG_MMU_PAGE_SIZE+907), CONFIG_MMU_PAGE_SIZE)]
static

◆ user_thread

struct k_thread user_thread