kernel: mm: remove k_mem_phys_un/map()

These functions were introduced alongside with the memory mapped
stack feature, and are currently only being used there only.
To avoid potential confusion with k_mem_un/map(), remove them
and use k_mem_map/unmap_phys_guard() directly instead.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-06-05 16:25:39 -07:00 committed by Anas Nashif
parent 9f9dd264d8
commit 295254a96b
3 changed files with 5 additions and 75 deletions

View file

@ -243,8 +243,6 @@ void z_phys_unmap(uint8_t *virt, size_t size);
*
* @see k_mem_map() for additional information if called via that.
*
* @see k_mem_phys_map() for additional information if called via that.
*
* @param phys Physical address base of the memory region if not requesting
* anonymous memory. Must be page-aligned.
* @param size Size of the memory mapping. This must be page-aligned.
@ -268,8 +266,6 @@ void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_
*
* @see k_mem_unmap() for additional information if called via that.
*
* @see k_mem_phys_unmap() for additional information if called via that.
*
* @note Calling this function on a region which was not mapped via
* k_mem_map_phys_guard() to begin with is undefined behavior.
*

View file

@ -173,50 +173,6 @@ static inline void *k_mem_map(size_t size, uint32_t flags)
return k_mem_map_phys_guard((uintptr_t)NULL, size, flags, true);
}
/**
* Map a physical memory region into kernel's virtual address space with guard pages.
*
* This function maps a contiguous physical memory region into kernel's
* virtual address space. Given a physical address and a size, return a
* linear address representing the base of where the physical region is mapped
* in the virtual address space for the Zephyr kernel.
*
* This function alters the active page tables in the area reserved
* for the kernel. This function will choose the virtual address
* and return it to the caller.
*
* If user thread access control needs to be managed in any way, do not enable
* K_MEM_PERM_USER flags here; instead manage the region's permissions
* with memory domain APIs after the mapping has been established. Setting
* K_MEM_PERM_USER here will allow all user threads to access this memory
* which is usually undesirable.
*
* Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
*
* The returned virtual memory pointer will be page-aligned. The size
* parameter, and any base address for re-mapping purposes must be page-
* aligned.
*
* Note that the allocation includes two guard pages immediately before
* and after the requested region. The total size of the allocation will be
* the requested size plus the size of these two guard pages.
*
* Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
* function, with details in the documentation for these flags.
*
* @param phys Physical address base of the memory region.
* This must be page-aligned.
* @param size Size of the memory mapping. This must be page-aligned.
* @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
*
* @return The mapped memory location, or NULL if insufficient virtual address
* space or insufficient memory for paging structures.
*/
static inline void *k_mem_phys_map(uintptr_t phys, size_t size, uint32_t flags)
{
return k_mem_map_phys_guard(phys, size, flags, false);
}
/**
* Un-map mapped memory
*
@ -235,29 +191,6 @@ static inline void k_mem_unmap(void *addr, size_t size)
k_mem_unmap_phys_guard(addr, size, true);
}
/**
* Un-map memory mapped via k_mem_phys_map().
*
* This unmaps a virtual memory region from kernel's virtual address space.
*
* This function alters the active page tables in the area reserved
* for the kernel.
*
* This removes a memory mapping for the provided page-aligned region
* and the guard pages. The kernel may re-use the associated virtual address
* region later.
*
* @note Calling this function on a region which was not mapped via
* k_mem_phys_map() to begin with is undefined behavior.
*
* @param addr Page-aligned memory region base virtual address
* @param size Page-aligned memory region size
*/
static inline void k_mem_phys_unmap(void *addr, size_t size)
{
k_mem_unmap_phys_guard(addr, size, false);
}
/**
* Given an arbitrary region, provide a aligned region that covers it
*

View file

@ -429,8 +429,9 @@ static char *setup_thread_stack(struct k_thread *new_thread,
* stack. If CONFIG_INIT_STACKS is enabled, the stack will be
* cleared below.
*/
void *stack_mapped = k_mem_phys_map((uintptr_t)stack, stack_obj_size,
K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT);
void *stack_mapped = k_mem_map_phys_guard((uintptr_t)stack, stack_obj_size,
K_MEM_PERM_RW | K_MEM_CACHE_WB | K_MEM_MAP_UNINIT,
false);
__ASSERT_NO_MSG((uintptr_t)stack_mapped != 0);
@ -1051,8 +1052,8 @@ void do_thread_cleanup(struct k_thread *thread)
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
if (thread_cleanup_stack_addr != NULL) {
k_mem_phys_unmap(thread_cleanup_stack_addr,
thread_cleanup_stack_sz);
k_mem_unmap_phys_guard(thread_cleanup_stack_addr,
thread_cleanup_stack_sz, false);
thread_cleanup_stack_addr = NULL;
}