kernel: mm: rename Z_VIRT_RAM_* to K_MEM_VIRT_*

Renames:
  Z_VIRT_RAM_START to K_MEM_VIRT_RAM_START
  Z_VIRT_RAM_SIZE to K_MEM_VIRT_RAM_SIZE
  Z_VIRT_RAM_END to K_MEM_VIRT_RAM_END

This is part of a series to move memory management related
stuff from Z_ namespace into its own namespace.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-06-07 09:37:48 -07:00 committed by Anas Nashif
parent 3fd66de508
commit 03eded1ed6
3 changed files with 36 additions and 32 deletions

View file

@ -73,7 +73,7 @@ below.
.. code-block:: none
:emphasize-lines: 1, 3, 9, 22, 24
+--------------+ <- Z_VIRT_RAM_START
+--------------+ <- K_MEM_VIRT_RAM_START
| Undefined VM | <- architecture specific reserved area
+--------------+ <- Z_KERNEL_VIRT_START
| Mapping for |
@ -96,17 +96,17 @@ below.
| Mapping |
+--------------+ <- memory mappings start here
| Reserved | <- special purpose virtual page(s) of size Z_VM_RESERVED
+--------------+ <- Z_VIRT_RAM_END
+--------------+ <- K_MEM_VIRT_RAM_END
* ``Z_VIRT_RAM_START`` is the beginning of the virtual memory address space.
* ``K_MEM_VIRT_RAM_START`` is the beginning of the virtual memory address space.
This needs to be page aligned. Currently, it is the same as
:kconfig:option:`CONFIG_KERNEL_VM_BASE`.
* ``Z_VIRT_RAM_SIZE`` is the size of the virtual memory address space.
* ``K_MEM_VIRT_RAM_SIZE`` is the size of the virtual memory address space.
This needs to be page aligned. Currently, it is the same as
:kconfig:option:`CONFIG_KERNEL_VM_SIZE`.
* ``Z_VIRT_RAM_END`` is simply (``Z_VIRT_RAM_START`` + ``Z_VIRT_RAM_SIZE``).
* ``K_MEM_VIRT_RAM_END`` is simply (``K_MEM_VIRT_RAM_START`` + ``K_MEM_VIRT_RAM_SIZE``).
* ``Z_KERNEL_VIRT_START`` is the same as ``z_mapped_start`` specified in the linker
script. This is the virtual address of the beginning of the kernel image at
@ -176,7 +176,7 @@ mappings must be aligned on page size and have finer access control.
* The requested size must be multiple of page size.
* The address returned is inside the virtual address space between
``Z_FREE_VM_START`` and ``Z_VIRT_RAM_END``.
``Z_FREE_VM_START`` and ``K_MEM_VIRT_RAM_END``.
* The mapped region is not guaranteed to be physically contiguous in memory.

View file

@ -37,10 +37,14 @@
#define Z_NUM_PAGE_FRAMES (K_MEM_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
/** End virtual address of virtual address space */
#define Z_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
#define Z_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
#define Z_VIRT_RAM_END (Z_VIRT_RAM_START + Z_VIRT_RAM_SIZE)
/** Start address of virtual memory. */
#define K_MEM_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
/** Size of virtual memory. */
#define K_MEM_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
/** End address (exclusive) of virtual memory. */
#define K_MEM_VIRT_RAM_END (K_MEM_VIRT_RAM_START + K_MEM_VIRT_RAM_SIZE)
/* Boot-time virtual location of the kernel image. */
#define Z_KERNEL_VIRT_START ((uint8_t *)&z_mapped_start[0])
@ -270,11 +274,11 @@ static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
__ASSERT(!Z_DETECT_POINTER_OVERFLOW(addr, size),
"region %p size %zu zero or wraps around", addr, size);
__ASSERT(IN_RANGE((uintptr_t)addr,
(uintptr_t)Z_VIRT_RAM_START,
((uintptr_t)Z_VIRT_RAM_END - 1)) &&
(uintptr_t)K_MEM_VIRT_RAM_START,
((uintptr_t)K_MEM_VIRT_RAM_END - 1)) &&
IN_RANGE(((uintptr_t)addr + size - 1),
(uintptr_t)Z_VIRT_RAM_START,
((uintptr_t)Z_VIRT_RAM_END - 1)),
(uintptr_t)K_MEM_VIRT_RAM_START,
((uintptr_t)K_MEM_VIRT_RAM_END - 1)),
"invalid virtual address region %p (%zu)", addr, size);
}

View file

@ -157,9 +157,9 @@ void z_page_frames_dump(void)
* the only guarantee is that such RAM mapping outside of the Zephyr image
* won't be disturbed by subsequent memory mapping calls.
*
* +--------------+ <- Z_VIRT_RAM_START
* +--------------+ <- K_MEM_VIRT_RAM_START
* | Undefined VM | <- May contain ancillary regions like x86_64's locore
* +--------------+ <- Z_KERNEL_VIRT_START (often == Z_VIRT_RAM_START)
* +--------------+ <- Z_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
* | Mapping for |
* | main kernel |
* | image |
@ -180,7 +180,7 @@ void z_page_frames_dump(void)
* | Mapping |
* +--------------+ <- mappings start here
* | Reserved | <- special purpose virtual page(s) of size Z_VM_RESERVED
* +--------------+ <- Z_VIRT_RAM_END
* +--------------+ <- K_MEM_VIRT_RAM_END
*/
/* Bitmap of virtual addresses where one bit corresponds to one page.
@ -196,17 +196,17 @@ SYS_BITARRAY_DEFINE_STATIC(virt_region_bitmap,
static bool virt_region_inited;
#define Z_VIRT_REGION_START_ADDR Z_FREE_VM_START
#define Z_VIRT_REGION_END_ADDR (Z_VIRT_RAM_END - Z_VM_RESERVED)
#define Z_VIRT_REGION_END_ADDR (K_MEM_VIRT_RAM_END - Z_VM_RESERVED)
static inline uintptr_t virt_from_bitmap_offset(size_t offset, size_t size)
{
return POINTER_TO_UINT(Z_VIRT_RAM_END)
return POINTER_TO_UINT(K_MEM_VIRT_RAM_END)
- (offset * CONFIG_MMU_PAGE_SIZE) - size;
}
static inline size_t virt_to_bitmap_offset(void *vaddr, size_t size)
{
return (POINTER_TO_UINT(Z_VIRT_RAM_END)
return (POINTER_TO_UINT(K_MEM_VIRT_RAM_END)
- POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE;
}
@ -228,8 +228,8 @@ static void virt_region_init(void)
/* Mark all bits up to Z_FREE_VM_START as allocated */
num_bits = POINTER_TO_UINT(Z_FREE_VM_START)
- POINTER_TO_UINT(Z_VIRT_RAM_START);
offset = virt_to_bitmap_offset(Z_VIRT_RAM_START, num_bits);
- POINTER_TO_UINT(K_MEM_VIRT_RAM_START);
offset = virt_to_bitmap_offset(K_MEM_VIRT_RAM_START, num_bits);
num_bits /= CONFIG_MMU_PAGE_SIZE;
(void)sys_bitarray_set_region(&virt_region_bitmap,
num_bits, offset);
@ -319,9 +319,9 @@ static void *virt_region_alloc(size_t size, size_t align)
/* Here is the memory organization when trying to get an aligned
* virtual address:
*
* +--------------+ <- Z_VIRT_RAM_START
* +--------------+ <- K_MEM_VIRT_RAM_START
* | Undefined VM |
* +--------------+ <- Z_KERNEL_VIRT_START (often == Z_VIRT_RAM_START)
* +--------------+ <- Z_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
* | Mapping for |
* | main kernel |
* | image |
@ -338,13 +338,13 @@ static void *virt_region_alloc(size_t size, size_t align)
* | |
* |..............| <- aligned_dest_addr + size
* | Unused |
* +==============+ <- offset from Z_VIRT_RAM_END == dest_addr + alloc_size
* +==============+ <- offset from K_MEM_VIRT_RAM_END == dest_addr + alloc_size
* | ... |
* +--------------+
* | Mapping |
* +--------------+
* | Reserved |
* +--------------+ <- Z_VIRT_RAM_END
* +--------------+ <- K_MEM_VIRT_RAM_END
*/
/* Free the two unused regions */
@ -827,14 +827,14 @@ void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32
*/
if (IN_RANGE(aligned_phys,
(uintptr_t)Z_VIRT_RAM_START,
(uintptr_t)(Z_VIRT_RAM_END - 1)) ||
(uintptr_t)K_MEM_VIRT_RAM_START,
(uintptr_t)(K_MEM_VIRT_RAM_END - 1)) ||
IN_RANGE(aligned_phys + aligned_size - 1,
(uintptr_t)Z_VIRT_RAM_START,
(uintptr_t)(Z_VIRT_RAM_END - 1))) {
uint8_t *adjusted_start = MAX(dest_addr, Z_VIRT_RAM_START);
(uintptr_t)K_MEM_VIRT_RAM_START,
(uintptr_t)(K_MEM_VIRT_RAM_END - 1))) {
uint8_t *adjusted_start = MAX(dest_addr, K_MEM_VIRT_RAM_START);
uint8_t *adjusted_end = MIN(dest_addr + aligned_size,
Z_VIRT_RAM_END);
K_MEM_VIRT_RAM_END);
size_t adjusted_sz = adjusted_end - adjusted_start;
num_bits = adjusted_sz / CONFIG_MMU_PAGE_SIZE;