kernel: mm: rename k_mem_un/map_impl to k_mem_*_phys_guard

The internal functions k_mem_map_impl() and k_mem_unmap_impl()
are renamed to k_mem_map_phys_guard() and
k_mem_unmap_phys_guard() respectively to better clarify
their usage.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-06-05 16:42:38 -07:00 committed by Anas Nashif
parent 98b26c6ca8
commit 9f9dd264d8
3 changed files with 45 additions and 12 deletions

View file

@ -209,7 +209,37 @@ void z_phys_unmap(uint8_t *virt, size_t size);
* Map memory into virtual address space with guard pages.
*
* This maps memory into virtual address space with a preceding and
* a succeeding guard pages.
* a succeeding guard pages. The memory mapped via this function must be
* unmapped using k_mem_unmap_phys_guard().
*
* This function maps a contiguous physical memory region into kernel's
* virtual address space with a preceding and a succeeding guard pages.
* Given a physical address and a size, return a linear address representing
* the base of where the physical region is mapped in the virtual address
* space for the Zephyr kernel.
*
* This function alters the active page tables in the area reserved
* for the kernel. This function will choose the virtual address
* and return it to the caller.
*
* If user thread access control needs to be managed in any way, do not enable
* K_MEM_PERM_USER flags here; instead manage the region's permissions
* with memory domain APIs after the mapping has been established. Setting
* K_MEM_PERM_USER here will allow all user threads to access this memory
* which is usually undesirable.
*
* Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
*
* The returned virtual memory pointer will be page-aligned. The size
* parameter, and any base address for re-mapping purposes must be page-
* aligned.
*
* Note that the allocation includes two guard pages immediately before
* and after the requested region. The total size of the allocation will be
* the requested size plus the size of these two guard pages.
*
* Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
* function, with details in the documentation for these flags.
*
* @see k_mem_map() for additional information if called via that.
*
@ -225,26 +255,29 @@ void z_phys_unmap(uint8_t *virt, size_t size);
* space, insufficient physical memory to establish the mapping,
* or insufficient memory for paging structures.
*/
void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon);
void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_anon);
/**
* Un-map mapped memory
* Un-map memory mapped via k_mem_map_phys_guard().
*
* This removes the memory mappings for the provided page-aligned region,
* and the two guard pages surrounding the region.
*
* This function alters the active page tables in the area reserved
* for the kernel.
*
* @see k_mem_unmap() for additional information if called via that.
*
* @see k_mem_phys_unmap() for additional information if called via that.
*
* @note Calling this function on a region which was not mapped to begin
* with is undefined behavior.
* @note Calling this function on a region which was not mapped via
* k_mem_map_phys_guard() to begin with is undefined behavior.
*
* @param addr Page-aligned memory region base virtual address
* @param size Page-aligned memory region size
* @param is_anon True if the mapped memory is from anonymous memory.
*/
void k_mem_unmap_impl(void *addr, size_t size, bool is_anon);
void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon);
#ifdef __cplusplus
}

View file

@ -170,7 +170,7 @@ size_t k_mem_free_get(void);
*/
static inline void *k_mem_map(size_t size, uint32_t flags)
{
return k_mem_map_impl((uintptr_t)NULL, size, flags, true);
return k_mem_map_phys_guard((uintptr_t)NULL, size, flags, true);
}
/**
@ -214,7 +214,7 @@ static inline void *k_mem_map(size_t size, uint32_t flags)
*/
static inline void *k_mem_phys_map(uintptr_t phys, size_t size, uint32_t flags)
{
return k_mem_map_impl(phys, size, flags, false);
return k_mem_map_phys_guard(phys, size, flags, false);
}
/**
@ -232,7 +232,7 @@ static inline void *k_mem_phys_map(uintptr_t phys, size_t size, uint32_t flags)
*/
static inline void k_mem_unmap(void *addr, size_t size)
{
k_mem_unmap_impl(addr, size, true);
k_mem_unmap_phys_guard(addr, size, true);
}
/**
@ -255,7 +255,7 @@ static inline void k_mem_unmap(void *addr, size_t size)
*/
static inline void k_mem_phys_unmap(void *addr, size_t size)
{
k_mem_unmap_impl(addr, size, false);
k_mem_unmap_phys_guard(addr, size, false);
}
/**

View file

@ -561,7 +561,7 @@ static int map_anon_page(void *addr, uint32_t flags)
return 0;
}
void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
{
uint8_t *dst;
size_t total_size;
@ -645,7 +645,7 @@ out:
return dst;
}
void k_mem_unmap_impl(void *addr, size_t size, bool is_anon)
void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
{
uintptr_t phys;
uint8_t *pos;