kernel: mm: rename z_page_frame_* to k_mem_page_frame_*
Also any demand paging and page frame related bits are renamed. This is part of a series to move memory management related stuff out of the Z_ namespace into its own namespace. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
7715aa3341
commit
54af5dda84
14 changed files with 214 additions and 202 deletions
|
|
@ -792,7 +792,7 @@ config ARCH_MAPS_ALL_RAM
|
|||
virtual addresses elsewhere, this is limited to only management of the
|
||||
virtual address space. The kernel's page frame ontology will not consider
|
||||
this mapping at all; non-kernel pages will be considered free (unless marked
|
||||
as reserved) and Z_PAGE_FRAME_MAPPED will not be set.
|
||||
as reserved) and K_MEM_PAGE_FRAME_MAPPED will not be set.
|
||||
|
||||
config DCLS
|
||||
bool "Processor is configured in DCLS mode"
|
||||
|
|
|
|||
|
|
@ -2004,11 +2004,12 @@ static void mark_addr_page_reserved(uintptr_t addr, size_t len)
|
|||
uintptr_t end = ROUND_UP(addr + len, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
for (; pos < end; pos += CONFIG_MMU_PAGE_SIZE) {
|
||||
if (!z_is_page_frame(pos)) {
|
||||
if (!k_mem_is_page_frame(pos)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
z_page_frame_set(z_phys_to_page_frame(pos), Z_PAGE_FRAME_RESERVED);
|
||||
k_mem_page_frame_set(k_mem_phys_to_page_frame(pos),
|
||||
K_MEM_PAGE_FRAME_RESERVED);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -353,7 +353,7 @@ __weak void arch_reserved_pages_update(void)
|
|||
for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0;
|
||||
page < (uintptr_t)z_mapped_start;
|
||||
page += CONFIG_MMU_PAGE_SIZE, idx++) {
|
||||
z_page_frame_set(&z_page_frames[idx], Z_PAGE_FRAME_RESERVED);
|
||||
k_mem_page_frame_set(&k_mem_page_frames[idx], K_MEM_PAGE_FRAME_RESERVED);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
|
||||
|
|
|
|||
|
|
@ -50,26 +50,26 @@ Page Frame
|
|||
A page frame is a page-sized physical memory region in RAM. It is a
|
||||
container where a data page may be placed. It is always referred to by
|
||||
physical address. Zephyr has a convention of using ``uintptr_t`` for physical
|
||||
addresses. For every page frame, a ``struct z_page_frame`` is instantiated to
|
||||
addresses. For every page frame, a ``struct k_mem_page_frame`` is instantiated to
|
||||
store metadata. Flags for each page frame:
|
||||
|
||||
* ``Z_PAGE_FRAME_FREE`` indicates a page frame is unused and on the list of
|
||||
* ``K_MEM_PAGE_FRAME_FREE`` indicates a page frame is unused and on the list of
|
||||
free page frames. When this flag is set, none of the other flags are
|
||||
meaningful and they must not be modified.
|
||||
|
||||
* ``Z_PAGE_FRAME_PINNED`` indicates a page frame is pinned in memory
|
||||
* ``K_MEM_PAGE_FRAME_PINNED`` indicates a page frame is pinned in memory
|
||||
and should never be paged out.
|
||||
|
||||
* ``Z_PAGE_FRAME_RESERVED`` indicates a physical page reserved by hardware
|
||||
* ``K_MEM_PAGE_FRAME_RESERVED`` indicates a physical page reserved by hardware
|
||||
and should not be used at all.
|
||||
|
||||
* ``Z_PAGE_FRAME_MAPPED`` is set when a physical page is mapped to
|
||||
* ``K_MEM_PAGE_FRAME_MAPPED`` is set when a physical page is mapped to
|
||||
virtual memory address.
|
||||
|
||||
* ``Z_PAGE_FRAME_BUSY`` indicates a page frame is currently involved in
|
||||
* ``K_MEM_PAGE_FRAME_BUSY`` indicates a page frame is currently involved in
|
||||
a page-in/out operation.
|
||||
|
||||
* ``Z_PAGE_FRAME_BACKED`` indicates a page frame has a clean copy
|
||||
* ``K_MEM_PAGE_FRAME_BACKED`` indicates a page frame has a clean copy
|
||||
in the backing store.
|
||||
|
||||
K_MEM_SCRATCH_PAGE
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ __syscall void k_mem_paging_histogram_backing_store_page_out_get(
|
|||
* @param [out] dirty Whether the page to evict is dirty
|
||||
* @return The page frame to evict
|
||||
*/
|
||||
struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
|
||||
struct k_mem_page_frame *k_mem_paging_eviction_select(bool *dirty);
|
||||
|
||||
/**
|
||||
* Initialization function
|
||||
|
|
@ -258,16 +258,16 @@ void k_mem_paging_eviction_init(void);
|
|||
* contents for later retrieval. The location value must be page-aligned.
|
||||
*
|
||||
* This function may be called multiple times on the same data page. If its
|
||||
* page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
|
||||
* page frame has its K_MEM_PAGE_FRAME_BACKED bit set, it is expected to return
|
||||
* the previous backing store location for the data page containing a cached
|
||||
* clean copy. This clean copy may be updated on page-out, or used to
|
||||
* discard clean pages without needing to write out their contents.
|
||||
*
|
||||
* If the backing store is full, some other backing store location which caches
|
||||
* a loaded data page may be selected, in which case its associated page frame
|
||||
* will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
|
||||
* will have the K_MEM_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
|
||||
*
|
||||
* z_page_frame_to_virt(pf) will indicate the virtual address the page is
|
||||
* k_mem_page_frame_to_virt(pf) will indicate the virtual address the page is
|
||||
* currently mapped to. Large, sparse backing stores which can contain the
|
||||
* entire address space may simply generate location tokens purely as a
|
||||
* function of that virtual address with no other management necessary.
|
||||
|
|
@ -285,7 +285,7 @@ void k_mem_paging_eviction_init(void);
|
|||
* @return 0 Success
|
||||
* @return -ENOMEM Backing store is full
|
||||
*/
|
||||
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
|
||||
int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf,
|
||||
uintptr_t *location,
|
||||
bool page_fault);
|
||||
|
||||
|
|
@ -331,7 +331,7 @@ void k_mem_paging_backing_store_page_in(uintptr_t location);
|
|||
* Update internal accounting after a page-in
|
||||
*
|
||||
* This is invoked after k_mem_paging_backing_store_page_in() and interrupts
|
||||
* have been* re-locked, making it safe to access the z_page_frame data.
|
||||
* have been* re-locked, making it safe to access the k_mem_page_frame data.
|
||||
* The location value will be the same passed to
|
||||
* k_mem_paging_backing_store_page_in().
|
||||
*
|
||||
|
|
@ -340,14 +340,14 @@ void k_mem_paging_backing_store_page_in(uintptr_t location);
|
|||
* if it is paged out again. This may be a no-op in some implementations.
|
||||
*
|
||||
* If the backing store caches paged-in data pages, this is the appropriate
|
||||
* time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
|
||||
* time to set the K_MEM_PAGE_FRAME_BACKED bit. The kernel only skips paging
|
||||
* out clean data pages if they are noted as clean in the page tables and the
|
||||
* Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
|
||||
* K_MEM_PAGE_FRAME_BACKED bit is set in their associated page frame.
|
||||
*
|
||||
* @param pf Page frame that was loaded in
|
||||
* @param location Location of where the loaded data page was retrieved
|
||||
*/
|
||||
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
|
||||
void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
|
||||
uintptr_t location);
|
||||
|
||||
/**
|
||||
|
|
@ -360,7 +360,7 @@ void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
|
|||
* - Initialize any internal data structures and accounting for the backing
|
||||
* store.
|
||||
* - If the backing store already contains all or some loaded kernel data pages
|
||||
* at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
|
||||
* at boot time, K_MEM_PAGE_FRAME_BACKED should be appropriately set for their
|
||||
* associated page frames, and any internal accounting set up appropriately.
|
||||
*/
|
||||
void k_mem_paging_backing_store_init(void);
|
||||
|
|
|
|||
|
|
@ -292,7 +292,7 @@ extern char lnkr_boot_noinit_size[];
|
|||
/* lnkr_pinned_start[] and lnkr_pinned_end[] must encapsulate
|
||||
* all the pinned sections as these are used by
|
||||
* the MMU code to mark the physical page frames with
|
||||
* Z_PAGE_FRAME_PINNED.
|
||||
* K_MEM_PAGE_FRAME_PINNED.
|
||||
*/
|
||||
extern char lnkr_pinned_start[];
|
||||
extern char lnkr_pinned_end[];
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ int arch_page_phys_get(void *virt, uintptr_t *phys);
|
|||
* example of this is reserved regions in the first megabyte on PC-like systems.
|
||||
*
|
||||
* Implementations of this function should mark all relevant entries in
|
||||
* z_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
|
||||
* k_mem_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
|
||||
* early system initialization with mm_lock held.
|
||||
*/
|
||||
void arch_reserved_pages_update(void);
|
||||
|
|
|
|||
|
|
@ -15,17 +15,6 @@
|
|||
#include <zephyr/kernel/mm.h>
|
||||
#include <zephyr/linker/linker-defs.h>
|
||||
|
||||
/*
|
||||
* At present, page frame management is only done for main system RAM,
|
||||
* and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
|
||||
* and CONFIG_SRAM_SIZE.
|
||||
*
|
||||
* If we have other RAM regions (DCCM, etc) these typically have special
|
||||
* properties and shouldn't be used generically for demand paging or
|
||||
* anonymous mappings. We don't currently maintain an ontology of these in the
|
||||
* core kernel.
|
||||
*/
|
||||
|
||||
/** Start address of physical memory. */
|
||||
#define K_MEM_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
|
||||
|
||||
|
|
@ -35,8 +24,6 @@
|
|||
/** End address (exclusive) of physical memory. */
|
||||
#define K_MEM_PHYS_RAM_END (K_MEM_PHYS_RAM_START + K_MEM_PHYS_RAM_SIZE)
|
||||
|
||||
#define Z_NUM_PAGE_FRAMES (K_MEM_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
|
||||
|
||||
/** Start address of virtual memory. */
|
||||
#define K_MEM_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
|
||||
|
||||
|
|
@ -112,28 +99,46 @@
|
|||
#define K_MEM_VM_FREE_START K_MEM_KERNEL_VIRT_END
|
||||
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
|
||||
|
||||
/*
|
||||
/**
|
||||
* @defgroup kernel_mm_page_frame_apis Kernel Memory Page Frame Management APIs
|
||||
* @ingroup kernel_mm_internal_apis
|
||||
* @{
|
||||
*
|
||||
* Macros and data structures for physical page frame accounting,
|
||||
* APIs for use by eviction and backing store algorithms. This code
|
||||
* is otherwise not application-facing.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Number of page frames.
|
||||
*
|
||||
* At present, page frame management is only done for main system RAM,
|
||||
* and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
|
||||
* and CONFIG_SRAM_SIZE.
|
||||
*
|
||||
* If we have other RAM regions (DCCM, etc) these typically have special
|
||||
* properties and shouldn't be used generically for demand paging or
|
||||
* anonymous mappings. We don't currently maintain an ontology of these in the
|
||||
* core kernel.
|
||||
*/
|
||||
#define K_MEM_NUM_PAGE_FRAMES (K_MEM_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* z_page_frame flags bits
|
||||
* k_mem_page_frame flags bits
|
||||
*
|
||||
* Requirements:
|
||||
* - Z_PAGE_FRAME_FREE must be one of the possible sfnode flag bits
|
||||
* - K_MEM_PAGE_FRAME_FREE must be one of the possible sfnode flag bits
|
||||
* - All bit values must be lower than CONFIG_MMU_PAGE_SIZE
|
||||
*/
|
||||
|
||||
/** This physical page is free and part of the free list */
|
||||
#define Z_PAGE_FRAME_FREE BIT(0)
|
||||
#define K_MEM_PAGE_FRAME_FREE BIT(0)
|
||||
|
||||
/** This physical page is reserved by hardware; we will never use it */
|
||||
#define Z_PAGE_FRAME_RESERVED BIT(1)
|
||||
#define K_MEM_PAGE_FRAME_RESERVED BIT(1)
|
||||
|
||||
/** This page contains critical kernel data and will never be swapped */
|
||||
#define Z_PAGE_FRAME_PINNED BIT(2)
|
||||
#define K_MEM_PAGE_FRAME_PINNED BIT(2)
|
||||
|
||||
/**
|
||||
* This physical page is mapped to some virtual memory address
|
||||
|
|
@ -141,17 +146,17 @@
|
|||
* Currently, we just support one mapping per page frame. If a page frame
|
||||
* is mapped to multiple virtual pages then it must be pinned.
|
||||
*/
|
||||
#define Z_PAGE_FRAME_MAPPED BIT(3)
|
||||
#define K_MEM_PAGE_FRAME_MAPPED BIT(3)
|
||||
|
||||
/**
|
||||
* This page frame is currently involved in a page-in/out operation
|
||||
*/
|
||||
#define Z_PAGE_FRAME_BUSY BIT(4)
|
||||
#define K_MEM_PAGE_FRAME_BUSY BIT(4)
|
||||
|
||||
/**
|
||||
* This page frame has a clean copy in the backing store
|
||||
*/
|
||||
#define Z_PAGE_FRAME_BACKED BIT(5)
|
||||
#define K_MEM_PAGE_FRAME_BACKED BIT(5)
|
||||
|
||||
/**
|
||||
* Data structure for physical page frames
|
||||
|
|
@ -159,17 +164,17 @@
|
|||
* An array of these is instantiated, one element per physical RAM page.
|
||||
* Hence it's necessary to constrain its size as much as possible.
|
||||
*/
|
||||
struct z_page_frame {
|
||||
struct k_mem_page_frame {
|
||||
union {
|
||||
/*
|
||||
* If mapped, Z_PAGE_FRAME_* flags and virtual address
|
||||
* If mapped, K_MEM_PAGE_FRAME_* flags and virtual address
|
||||
* this page is mapped to.
|
||||
*/
|
||||
uintptr_t va_and_flags;
|
||||
|
||||
/*
|
||||
* If unmapped and available, free pages list membership
|
||||
* with the Z_PAGE_FRAME_FREE flag.
|
||||
* with the K_MEM_PAGE_FRAME_FREE flag.
|
||||
*/
|
||||
sys_sfnode_t node;
|
||||
};
|
||||
|
|
@ -177,68 +182,68 @@ struct z_page_frame {
|
|||
/* Backing store and eviction algorithms may both need to
|
||||
* require additional per-frame custom data for accounting purposes.
|
||||
* They should declare their own array with indices matching
|
||||
* z_page_frames[] ones whenever possible.
|
||||
* k_mem_page_frames[] ones whenever possible.
|
||||
* They may also want additional flags bits that could be stored here
|
||||
* and they shouldn't clobber each other. At all costs the total
|
||||
* size of struct z_page_frame must be minimized.
|
||||
* size of struct k_mem_page_frame must be minimized.
|
||||
*/
|
||||
};
|
||||
|
||||
/* Note: this must be false for the other flag bits to be valid */
|
||||
static inline bool z_page_frame_is_free(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_free(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_FREE) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_FREE) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_pinned(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_PINNED) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_PINNED) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_reserved(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_RESERVED) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_RESERVED) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_mapped(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_MAPPED) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_MAPPED) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_busy(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_BUSY) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_BUSY) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_backed(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (pf->va_and_flags & Z_PAGE_FRAME_BACKED) != 0U;
|
||||
return (pf->va_and_flags & K_MEM_PAGE_FRAME_BACKED) != 0U;
|
||||
}
|
||||
|
||||
static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
|
||||
static inline bool k_mem_page_frame_is_evictable(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (!z_page_frame_is_free(pf) &&
|
||||
!z_page_frame_is_reserved(pf) &&
|
||||
z_page_frame_is_mapped(pf) &&
|
||||
!z_page_frame_is_pinned(pf) &&
|
||||
!z_page_frame_is_busy(pf));
|
||||
return (!k_mem_page_frame_is_free(pf) &&
|
||||
!k_mem_page_frame_is_reserved(pf) &&
|
||||
k_mem_page_frame_is_mapped(pf) &&
|
||||
!k_mem_page_frame_is_pinned(pf) &&
|
||||
!k_mem_page_frame_is_busy(pf));
|
||||
}
|
||||
|
||||
/* If true, page is not being used for anything, is not reserved, is not
|
||||
* a member of some free pages list, isn't busy, and is ready to be mapped
|
||||
* in memory
|
||||
*/
|
||||
static inline bool z_page_frame_is_available(struct z_page_frame *page)
|
||||
static inline bool k_mem_page_frame_is_available(struct k_mem_page_frame *page)
|
||||
{
|
||||
return page->va_and_flags == 0U;
|
||||
}
|
||||
|
||||
static inline void z_page_frame_set(struct z_page_frame *pf, uint8_t flags)
|
||||
static inline void k_mem_page_frame_set(struct k_mem_page_frame *pf, uint8_t flags)
|
||||
{
|
||||
pf->va_and_flags |= flags;
|
||||
}
|
||||
|
||||
static inline void z_page_frame_clear(struct z_page_frame *pf, uint8_t flags)
|
||||
static inline void k_mem_page_frame_clear(struct k_mem_page_frame *pf, uint8_t flags)
|
||||
{
|
||||
/* ensure bit inversion to follow is done on the proper type width */
|
||||
uintptr_t wide_flags = flags;
|
||||
|
|
@ -246,46 +251,46 @@ static inline void z_page_frame_clear(struct z_page_frame *pf, uint8_t flags)
|
|||
pf->va_and_flags &= ~wide_flags;
|
||||
}
|
||||
|
||||
static inline void z_assert_phys_aligned(uintptr_t phys)
|
||||
static inline void k_mem_assert_phys_aligned(uintptr_t phys)
|
||||
{
|
||||
__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
|
||||
"physical address 0x%lx is not page-aligned", phys);
|
||||
(void)phys;
|
||||
}
|
||||
|
||||
extern struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
|
||||
extern struct k_mem_page_frame k_mem_page_frames[K_MEM_NUM_PAGE_FRAMES];
|
||||
|
||||
static inline uintptr_t z_page_frame_to_phys(struct z_page_frame *pf)
|
||||
static inline uintptr_t k_mem_page_frame_to_phys(struct k_mem_page_frame *pf)
|
||||
{
|
||||
return (uintptr_t)((pf - z_page_frames) * CONFIG_MMU_PAGE_SIZE) +
|
||||
return (uintptr_t)((pf - k_mem_page_frames) * CONFIG_MMU_PAGE_SIZE) +
|
||||
K_MEM_PHYS_RAM_START;
|
||||
}
|
||||
|
||||
/* Presumes there is but one mapping in the virtual address space */
|
||||
static inline void *z_page_frame_to_virt(struct z_page_frame *pf)
|
||||
static inline void *k_mem_page_frame_to_virt(struct k_mem_page_frame *pf)
|
||||
{
|
||||
uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1;
|
||||
|
||||
return (void *)(pf->va_and_flags & ~flags_mask);
|
||||
}
|
||||
|
||||
static inline bool z_is_page_frame(uintptr_t phys)
|
||||
static inline bool k_mem_is_page_frame(uintptr_t phys)
|
||||
{
|
||||
z_assert_phys_aligned(phys);
|
||||
k_mem_assert_phys_aligned(phys);
|
||||
return IN_RANGE(phys, (uintptr_t)K_MEM_PHYS_RAM_START,
|
||||
(uintptr_t)(K_MEM_PHYS_RAM_END - 1));
|
||||
}
|
||||
|
||||
static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
|
||||
static inline struct k_mem_page_frame *k_mem_phys_to_page_frame(uintptr_t phys)
|
||||
{
|
||||
__ASSERT(z_is_page_frame(phys),
|
||||
__ASSERT(k_mem_is_page_frame(phys),
|
||||
"0x%lx not an SRAM physical address", phys);
|
||||
|
||||
return &z_page_frames[(phys - K_MEM_PHYS_RAM_START) /
|
||||
CONFIG_MMU_PAGE_SIZE];
|
||||
return &k_mem_page_frames[(phys - K_MEM_PHYS_RAM_START) /
|
||||
CONFIG_MMU_PAGE_SIZE];
|
||||
}
|
||||
|
||||
static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
|
||||
static inline void k_mem_assert_virtual_region(uint8_t *addr, size_t size)
|
||||
{
|
||||
__ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
|
||||
"unaligned addr %p", addr);
|
||||
|
|
@ -302,17 +307,22 @@ static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
|
|||
"invalid virtual address region %p (%zu)", addr, size);
|
||||
}
|
||||
|
||||
/* Debug function, pretty-print page frame information for all frames
|
||||
/**
|
||||
* @brief Pretty-print page frame information for all page frames.
|
||||
*
|
||||
* Debug function, pretty-print page frame information for all frames
|
||||
* concisely to printk.
|
||||
*/
|
||||
void z_page_frames_dump(void);
|
||||
void k_mem_page_frames_dump(void);
|
||||
|
||||
/* Convenience macro for iterating over all page frames */
|
||||
#define Z_PAGE_FRAME_FOREACH(_phys, _pageframe) \
|
||||
for ((_phys) = K_MEM_PHYS_RAM_START, (_pageframe) = z_page_frames; \
|
||||
#define K_MEM_PAGE_FRAME_FOREACH(_phys, _pageframe) \
|
||||
for ((_phys) = K_MEM_PHYS_RAM_START, (_pageframe) = k_mem_page_frames; \
|
||||
(_phys) < K_MEM_PHYS_RAM_END; \
|
||||
(_phys) += CONFIG_MMU_PAGE_SIZE, (_pageframe)++)
|
||||
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @def K_MEM_VM_RESERVED
|
||||
* @brief Reserve space at the end of virtual memory.
|
||||
|
|
@ -365,7 +375,7 @@ unsigned long z_num_pagefaults_get(void);
|
|||
* @retval 0 Success
|
||||
* @retval -ENOMEM Insufficient backing store space
|
||||
*/
|
||||
int z_page_frame_evict(uintptr_t phys);
|
||||
int k_mem_page_frame_evict(uintptr_t phys);
|
||||
|
||||
/**
|
||||
* Handle a page fault for a virtual data page
|
||||
|
|
|
|||
180
kernel/mmu.c
180
kernel/mmu.c
|
|
@ -31,7 +31,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
|||
* - A page frame is a page-sized physical memory region in RAM. It is a
|
||||
* container where a data page may be placed. It is always referred to by
|
||||
* physical address. We have a convention of using uintptr_t for physical
|
||||
* addresses. We instantiate a struct z_page_frame to store metadata for
|
||||
* addresses. We instantiate a struct k_mem_page_frame to store metadata for
|
||||
* every page frame.
|
||||
*
|
||||
* - A data page is a page-sized region of data. It may exist in a page frame,
|
||||
|
|
@ -51,10 +51,10 @@ struct k_spinlock z_mm_lock;
|
|||
*/
|
||||
|
||||
/* Database of all RAM page frames */
|
||||
struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
|
||||
struct k_mem_page_frame k_mem_page_frames[K_MEM_NUM_PAGE_FRAMES];
|
||||
|
||||
#if __ASSERT_ON
|
||||
/* Indicator that z_page_frames has been initialized, many of these APIs do
|
||||
/* Indicator that k_mem_page_frames has been initialized, many of these APIs do
|
||||
* not work before POST_KERNEL
|
||||
*/
|
||||
static bool page_frames_initialized;
|
||||
|
|
@ -79,24 +79,24 @@ static bool page_frames_initialized;
|
|||
#endif /* COLOR_PAGE_FRAMES */
|
||||
|
||||
/* LCOV_EXCL_START */
|
||||
static void page_frame_dump(struct z_page_frame *pf)
|
||||
static void page_frame_dump(struct k_mem_page_frame *pf)
|
||||
{
|
||||
if (z_page_frame_is_free(pf)) {
|
||||
if (k_mem_page_frame_is_free(pf)) {
|
||||
COLOR(GREY);
|
||||
printk("-");
|
||||
} else if (z_page_frame_is_reserved(pf)) {
|
||||
} else if (k_mem_page_frame_is_reserved(pf)) {
|
||||
COLOR(CYAN);
|
||||
printk("R");
|
||||
} else if (z_page_frame_is_busy(pf)) {
|
||||
} else if (k_mem_page_frame_is_busy(pf)) {
|
||||
COLOR(MAGENTA);
|
||||
printk("B");
|
||||
} else if (z_page_frame_is_pinned(pf)) {
|
||||
} else if (k_mem_page_frame_is_pinned(pf)) {
|
||||
COLOR(YELLOW);
|
||||
printk("P");
|
||||
} else if (z_page_frame_is_available(pf)) {
|
||||
} else if (k_mem_page_frame_is_available(pf)) {
|
||||
COLOR(GREY);
|
||||
printk(".");
|
||||
} else if (z_page_frame_is_mapped(pf)) {
|
||||
} else if (k_mem_page_frame_is_mapped(pf)) {
|
||||
COLOR(DEFAULT);
|
||||
printk("M");
|
||||
} else {
|
||||
|
|
@ -105,7 +105,7 @@ static void page_frame_dump(struct z_page_frame *pf)
|
|||
}
|
||||
}
|
||||
|
||||
void z_page_frames_dump(void)
|
||||
void k_mem_page_frames_dump(void)
|
||||
{
|
||||
int column = 0;
|
||||
|
||||
|
|
@ -113,8 +113,8 @@ void z_page_frames_dump(void)
|
|||
printk("Physical memory from 0x%lx to 0x%lx\n",
|
||||
K_MEM_PHYS_RAM_START, K_MEM_PHYS_RAM_END);
|
||||
|
||||
for (int i = 0; i < Z_NUM_PAGE_FRAMES; i++) {
|
||||
struct z_page_frame *pf = &z_page_frames[i];
|
||||
for (int i = 0; i < K_MEM_NUM_PAGE_FRAMES; i++) {
|
||||
struct k_mem_page_frame *pf = &k_mem_page_frames[i];
|
||||
|
||||
page_frame_dump(pf);
|
||||
|
||||
|
|
@ -392,20 +392,20 @@ static sys_sflist_t free_page_frame_list;
|
|||
static size_t z_free_page_count;
|
||||
|
||||
#define PF_ASSERT(pf, expr, fmt, ...) \
|
||||
__ASSERT(expr, "page frame 0x%lx: " fmt, z_page_frame_to_phys(pf), \
|
||||
__ASSERT(expr, "page frame 0x%lx: " fmt, k_mem_page_frame_to_phys(pf), \
|
||||
##__VA_ARGS__)
|
||||
|
||||
/* Get an unused page frame. don't care which one, or NULL if there are none */
|
||||
static struct z_page_frame *free_page_frame_list_get(void)
|
||||
static struct k_mem_page_frame *free_page_frame_list_get(void)
|
||||
{
|
||||
sys_sfnode_t *node;
|
||||
struct z_page_frame *pf = NULL;
|
||||
struct k_mem_page_frame *pf = NULL;
|
||||
|
||||
node = sys_sflist_get(&free_page_frame_list);
|
||||
if (node != NULL) {
|
||||
z_free_page_count--;
|
||||
pf = CONTAINER_OF(node, struct z_page_frame, node);
|
||||
PF_ASSERT(pf, z_page_frame_is_free(pf),
|
||||
pf = CONTAINER_OF(node, struct k_mem_page_frame, node);
|
||||
PF_ASSERT(pf, k_mem_page_frame_is_free(pf),
|
||||
"on free list but not free");
|
||||
pf->va_and_flags = 0;
|
||||
}
|
||||
|
|
@ -414,12 +414,12 @@ static struct z_page_frame *free_page_frame_list_get(void)
|
|||
}
|
||||
|
||||
/* Release a page frame back into the list of free pages */
|
||||
static void free_page_frame_list_put(struct z_page_frame *pf)
|
||||
static void free_page_frame_list_put(struct k_mem_page_frame *pf)
|
||||
{
|
||||
PF_ASSERT(pf, z_page_frame_is_available(pf),
|
||||
PF_ASSERT(pf, k_mem_page_frame_is_available(pf),
|
||||
"unavailable page put on free list");
|
||||
|
||||
sys_sfnode_init(&pf->node, Z_PAGE_FRAME_FREE);
|
||||
sys_sfnode_init(&pf->node, K_MEM_PAGE_FRAME_FREE);
|
||||
sys_sflist_append(&free_page_frame_list, &pf->node);
|
||||
z_free_page_count++;
|
||||
}
|
||||
|
|
@ -429,7 +429,7 @@ static void free_page_frame_list_init(void)
|
|||
sys_sflist_init(&free_page_frame_list);
|
||||
}
|
||||
|
||||
static void page_frame_free_locked(struct z_page_frame *pf)
|
||||
static void page_frame_free_locked(struct k_mem_page_frame *pf)
|
||||
{
|
||||
pf->va_and_flags = 0;
|
||||
free_page_frame_list_put(pf);
|
||||
|
|
@ -442,11 +442,11 @@ static void page_frame_free_locked(struct z_page_frame *pf)
|
|||
/* Called after the frame is mapped in the arch layer, to update our
|
||||
* local ontology (and do some assertions while we're at it)
|
||||
*/
|
||||
static void frame_mapped_set(struct z_page_frame *pf, void *addr)
|
||||
static void frame_mapped_set(struct k_mem_page_frame *pf, void *addr)
|
||||
{
|
||||
PF_ASSERT(pf, !z_page_frame_is_free(pf),
|
||||
PF_ASSERT(pf, !k_mem_page_frame_is_free(pf),
|
||||
"attempted to map a page frame on the free list");
|
||||
PF_ASSERT(pf, !z_page_frame_is_reserved(pf),
|
||||
PF_ASSERT(pf, !k_mem_page_frame_is_reserved(pf),
|
||||
"attempted to map a reserved page frame");
|
||||
|
||||
/* We do allow multiple mappings for pinned page frames
|
||||
|
|
@ -454,15 +454,15 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
|
|||
* This is uncommon, use-cases are for things like the
|
||||
* Zephyr equivalent of VSDOs
|
||||
*/
|
||||
PF_ASSERT(pf, !z_page_frame_is_mapped(pf) || z_page_frame_is_pinned(pf),
|
||||
PF_ASSERT(pf, !k_mem_page_frame_is_mapped(pf) || k_mem_page_frame_is_pinned(pf),
|
||||
"non-pinned and already mapped to %p",
|
||||
z_page_frame_to_virt(pf));
|
||||
k_mem_page_frame_to_virt(pf));
|
||||
|
||||
uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1;
|
||||
uintptr_t va = (uintptr_t)addr & ~flags_mask;
|
||||
|
||||
pf->va_and_flags &= flags_mask;
|
||||
pf->va_and_flags |= va | Z_PAGE_FRAME_MAPPED;
|
||||
pf->va_and_flags |= va | K_MEM_PAGE_FRAME_MAPPED;
|
||||
}
|
||||
|
||||
/* LCOV_EXCL_START */
|
||||
|
|
@ -479,15 +479,15 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
|
|||
static int virt_to_page_frame(void *virt, uintptr_t *phys)
|
||||
{
|
||||
uintptr_t paddr;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
int ret = -EFAULT;
|
||||
|
||||
Z_PAGE_FRAME_FOREACH(paddr, pf) {
|
||||
if (z_page_frame_is_mapped(pf)) {
|
||||
if (virt == z_page_frame_to_virt(pf)) {
|
||||
K_MEM_PAGE_FRAME_FOREACH(paddr, pf) {
|
||||
if (k_mem_page_frame_is_mapped(pf)) {
|
||||
if (virt == k_mem_page_frame_to_virt(pf)) {
|
||||
ret = 0;
|
||||
if (phys != NULL) {
|
||||
*phys = z_page_frame_to_phys(pf);
|
||||
*phys = k_mem_page_frame_to_phys(pf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -501,7 +501,7 @@ static int virt_to_page_frame(void *virt, uintptr_t *phys)
|
|||
__weak FUNC_ALIAS(virt_to_page_frame, arch_page_phys_get, int);
|
||||
|
||||
#ifdef CONFIG_DEMAND_PAGING
|
||||
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
||||
static int page_frame_prepare_locked(struct k_mem_page_frame *pf, bool *dirty_ptr,
|
||||
bool page_in, uintptr_t *location_ptr);
|
||||
|
||||
static inline void do_backing_store_page_in(uintptr_t location);
|
||||
|
|
@ -519,7 +519,7 @@ static inline void do_backing_store_page_out(uintptr_t location);
|
|||
*/
|
||||
static int map_anon_page(void *addr, uint32_t flags)
|
||||
{
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
uintptr_t phys;
|
||||
bool lock = (flags & K_MEM_MAP_LOCK) != 0U;
|
||||
|
||||
|
|
@ -533,8 +533,8 @@ static int map_anon_page(void *addr, uint32_t flags)
|
|||
pf = k_mem_paging_eviction_select(&dirty);
|
||||
__ASSERT(pf != NULL, "failed to get a page frame");
|
||||
LOG_DBG("evicting %p at 0x%lx",
|
||||
z_page_frame_to_virt(pf),
|
||||
z_page_frame_to_phys(pf));
|
||||
k_mem_page_frame_to_virt(pf),
|
||||
k_mem_page_frame_to_phys(pf));
|
||||
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
|
||||
if (ret != 0) {
|
||||
return -ENOMEM;
|
||||
|
|
@ -548,11 +548,11 @@ static int map_anon_page(void *addr, uint32_t flags)
|
|||
#endif /* CONFIG_DEMAND_PAGING */
|
||||
}
|
||||
|
||||
phys = z_page_frame_to_phys(pf);
|
||||
phys = k_mem_page_frame_to_phys(pf);
|
||||
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
|
||||
|
||||
if (lock) {
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
frame_mapped_set(pf, addr);
|
||||
|
||||
|
|
@ -649,7 +649,7 @@ void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
|
|||
{
|
||||
uintptr_t phys;
|
||||
uint8_t *pos;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
k_spinlock_key_t key;
|
||||
size_t total_size;
|
||||
int ret;
|
||||
|
|
@ -661,7 +661,7 @@ void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
|
|||
* for two guard pages.
|
||||
*/
|
||||
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
|
||||
z_mem_assert_virtual_region(pos, size + (CONFIG_MMU_PAGE_SIZE * 2));
|
||||
k_mem_assert_virtual_region(pos, size + (CONFIG_MMU_PAGE_SIZE * 2));
|
||||
|
||||
key = k_spin_lock(&z_mm_lock);
|
||||
|
||||
|
|
@ -699,9 +699,9 @@ void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
|
|||
goto out;
|
||||
}
|
||||
|
||||
__ASSERT(z_is_page_frame(phys),
|
||||
__ASSERT(k_mem_is_page_frame(phys),
|
||||
"%s: 0x%lx is not a page frame", __func__, phys);
|
||||
if (!z_is_page_frame(phys)) {
|
||||
if (!k_mem_is_page_frame(phys)) {
|
||||
/* Physical address has no corresponding page frame
|
||||
* description in the page frame array.
|
||||
* This should not happen. Do not continue.
|
||||
|
|
@ -710,11 +710,11 @@ void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon)
|
|||
}
|
||||
|
||||
/* Grab the corresponding page frame from physical address */
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
pf = k_mem_phys_to_page_frame(phys);
|
||||
|
||||
__ASSERT(z_page_frame_is_mapped(pf),
|
||||
__ASSERT(k_mem_page_frame_is_mapped(pf),
|
||||
"%s: 0x%lx is not a mapped page frame", __func__, phys);
|
||||
if (!z_page_frame_is_mapped(pf)) {
|
||||
if (!k_mem_page_frame_is_mapped(pf)) {
|
||||
/* Page frame is not marked mapped.
|
||||
* This should not happen. Do not continue.
|
||||
*/
|
||||
|
|
@ -925,7 +925,7 @@ size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
|
|||
static void mark_linker_section_pinned(void *start_addr, void *end_addr,
|
||||
bool pin)
|
||||
{
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
uint8_t *addr;
|
||||
|
||||
uintptr_t pinned_start = ROUND_DOWN(POINTER_TO_UINT(start_addr),
|
||||
|
|
@ -936,13 +936,13 @@ static void mark_linker_section_pinned(void *start_addr, void *end_addr,
|
|||
|
||||
VIRT_FOREACH(UINT_TO_POINTER(pinned_start), pinned_size, addr)
|
||||
{
|
||||
pf = z_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
|
||||
pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
|
||||
frame_mapped_set(pf, addr);
|
||||
|
||||
if (pin) {
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
} else {
|
||||
z_page_frame_clear(pf, Z_PAGE_FRAME_PINNED);
|
||||
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -952,7 +952,7 @@ void z_mem_manage_init(void)
|
|||
{
|
||||
uintptr_t phys;
|
||||
uint8_t *addr;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
k_spinlock_key_t key = k_spin_lock(&z_mm_lock);
|
||||
|
||||
free_page_frame_list_init();
|
||||
|
|
@ -961,7 +961,7 @@ void z_mem_manage_init(void)
|
|||
|
||||
#ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
|
||||
/* If some page frames are unavailable for use as memory, arch
|
||||
* code will mark Z_PAGE_FRAME_RESERVED in their flags
|
||||
* code will mark K_MEM_PAGE_FRAME_RESERVED in their flags
|
||||
*/
|
||||
arch_reserved_pages_update();
|
||||
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
|
||||
|
|
@ -972,7 +972,7 @@ void z_mem_manage_init(void)
|
|||
*/
|
||||
VIRT_FOREACH(K_MEM_KERNEL_VIRT_START, K_MEM_KERNEL_VIRT_SIZE, addr)
|
||||
{
|
||||
pf = z_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
|
||||
pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr));
|
||||
frame_mapped_set(pf, addr);
|
||||
|
||||
/* TODO: for now we pin the whole Zephyr image. Demand paging
|
||||
|
|
@ -985,7 +985,7 @@ void z_mem_manage_init(void)
|
|||
* structures, and any code used to perform page fault
|
||||
* handling, page-ins, etc.
|
||||
*/
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
#endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
|
||||
|
||||
|
|
@ -1004,8 +1004,8 @@ void z_mem_manage_init(void)
|
|||
/* Any remaining pages that aren't mapped, reserved, or pinned get
|
||||
* added to the free pages list
|
||||
*/
|
||||
Z_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
if (z_page_frame_is_available(pf)) {
|
||||
K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
if (k_mem_page_frame_is_available(pf)) {
|
||||
free_page_frame_list_put(pf);
|
||||
}
|
||||
}
|
||||
|
|
@ -1127,7 +1127,7 @@ BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP));
|
|||
static void virt_region_foreach(void *addr, size_t size,
|
||||
void (*func)(void *))
|
||||
{
|
||||
z_mem_assert_virtual_region(addr, size);
|
||||
k_mem_assert_virtual_region(addr, size);
|
||||
|
||||
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
|
||||
func((uint8_t *)addr + offset);
|
||||
|
|
@ -1150,15 +1150,15 @@ static void virt_region_foreach(void *addr, size_t size,
|
|||
*
|
||||
* Returns -ENOMEM if the backing store is full
|
||||
*/
|
||||
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
||||
static int page_frame_prepare_locked(struct k_mem_page_frame *pf, bool *dirty_ptr,
|
||||
bool page_fault, uintptr_t *location_ptr)
|
||||
{
|
||||
uintptr_t phys;
|
||||
int ret;
|
||||
bool dirty = *dirty_ptr;
|
||||
|
||||
phys = z_page_frame_to_phys(pf);
|
||||
__ASSERT(!z_page_frame_is_pinned(pf), "page frame 0x%lx is pinned",
|
||||
phys = k_mem_page_frame_to_phys(pf);
|
||||
__ASSERT(!k_mem_page_frame_is_pinned(pf), "page frame 0x%lx is pinned",
|
||||
phys);
|
||||
|
||||
/* If the backing store doesn't have a copy of the page, even if it
|
||||
|
|
@ -1172,31 +1172,31 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
|||
* evicted from the backing store to make room for other evicted
|
||||
* pages.
|
||||
*/
|
||||
if (z_page_frame_is_mapped(pf)) {
|
||||
dirty = dirty || !z_page_frame_is_backed(pf);
|
||||
if (k_mem_page_frame_is_mapped(pf)) {
|
||||
dirty = dirty || !k_mem_page_frame_is_backed(pf);
|
||||
}
|
||||
|
||||
if (dirty || page_fault) {
|
||||
arch_mem_scratch(phys);
|
||||
}
|
||||
|
||||
if (z_page_frame_is_mapped(pf)) {
|
||||
if (k_mem_page_frame_is_mapped(pf)) {
|
||||
ret = k_mem_paging_backing_store_location_get(pf, location_ptr,
|
||||
page_fault);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("out of backing store memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
arch_mem_page_out(z_page_frame_to_virt(pf), *location_ptr);
|
||||
arch_mem_page_out(k_mem_page_frame_to_virt(pf), *location_ptr);
|
||||
} else {
|
||||
/* Shouldn't happen unless this function is mis-used */
|
||||
__ASSERT(!dirty, "un-mapped page determined to be dirty");
|
||||
}
|
||||
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
|
||||
/* Mark as busy so that z_page_frame_is_evictable() returns false */
|
||||
__ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
|
||||
/* Mark as busy so that k_mem_page_frame_is_evictable() returns false */
|
||||
__ASSERT(!k_mem_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
|
||||
phys);
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_BUSY);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_BUSY);
|
||||
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
|
||||
/* Update dirty parameter, since we set to true if it wasn't backed
|
||||
* even if otherwise clean
|
||||
|
|
@ -1209,7 +1209,7 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
|||
static int do_mem_evict(void *addr)
|
||||
{
|
||||
bool dirty;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
uintptr_t location;
|
||||
int key, ret;
|
||||
uintptr_t flags, phys;
|
||||
|
|
@ -1231,8 +1231,8 @@ static int do_mem_evict(void *addr)
|
|||
}
|
||||
|
||||
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
__ASSERT(z_page_frame_to_virt(pf) == addr, "page frame address mismatch");
|
||||
pf = k_mem_phys_to_page_frame(phys);
|
||||
__ASSERT(k_mem_page_frame_to_virt(pf) == addr, "page frame address mismatch");
|
||||
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
|
|
@ -1261,7 +1261,7 @@ int k_mem_page_out(void *addr, size_t size)
|
|||
{
|
||||
__ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
|
||||
addr);
|
||||
z_mem_assert_virtual_region(addr, size);
|
||||
k_mem_assert_virtual_region(addr, size);
|
||||
|
||||
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
|
||||
void *pos = (uint8_t *)addr + offset;
|
||||
|
|
@ -1276,10 +1276,10 @@ int k_mem_page_out(void *addr, size_t size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int z_page_frame_evict(uintptr_t phys)
|
||||
int k_mem_page_frame_evict(uintptr_t phys)
|
||||
{
|
||||
int key, ret;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
bool dirty;
|
||||
uintptr_t flags;
|
||||
uintptr_t location;
|
||||
|
|
@ -1298,13 +1298,13 @@ int z_page_frame_evict(uintptr_t phys)
|
|||
k_sched_lock();
|
||||
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
|
||||
key = irq_lock();
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
if (!z_page_frame_is_mapped(pf)) {
|
||||
pf = k_mem_phys_to_page_frame(phys);
|
||||
if (!k_mem_page_frame_is_mapped(pf)) {
|
||||
/* Nothing to do, free page */
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
flags = arch_page_info_get(z_page_frame_to_virt(pf), NULL, false);
|
||||
flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false);
|
||||
/* Shouldn't ever happen */
|
||||
__ASSERT((flags & ARCH_DATA_PAGE_LOADED) != 0, "data page not loaded");
|
||||
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
|
||||
|
|
@ -1390,9 +1390,9 @@ static inline void paging_stats_eviction_inc(struct k_thread *faulting_thread,
|
|||
#endif /* CONFIG_DEMAND_PAGING_STATS */
|
||||
}
|
||||
|
||||
static inline struct z_page_frame *do_eviction_select(bool *dirty)
|
||||
static inline struct k_mem_page_frame *do_eviction_select(bool *dirty)
|
||||
{
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
|
||||
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
|
||||
uint32_t time_diff;
|
||||
|
|
@ -1426,7 +1426,7 @@ static inline struct z_page_frame *do_eviction_select(bool *dirty)
|
|||
|
||||
static bool do_page_fault(void *addr, bool pin)
|
||||
{
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
int key, ret;
|
||||
uintptr_t page_in_location, page_out_location;
|
||||
enum arch_page_location status;
|
||||
|
|
@ -1489,8 +1489,8 @@ static bool do_page_fault(void *addr, bool pin)
|
|||
/* It's a physical memory address */
|
||||
uintptr_t phys = page_in_location;
|
||||
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
|
||||
pf = k_mem_phys_to_page_frame(phys);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
|
||||
/* This if-block is to pin the page if it is
|
||||
|
|
@ -1511,8 +1511,8 @@ static bool do_page_fault(void *addr, bool pin)
|
|||
pf = do_eviction_select(&dirty);
|
||||
__ASSERT(pf != NULL, "failed to get a page frame");
|
||||
LOG_DBG("evicting %p at 0x%lx",
|
||||
z_page_frame_to_virt(pf),
|
||||
z_page_frame_to_phys(pf));
|
||||
k_mem_page_frame_to_virt(pf),
|
||||
k_mem_page_frame_to_phys(pf));
|
||||
|
||||
paging_stats_eviction_inc(faulting_thread, dirty);
|
||||
}
|
||||
|
|
@ -1533,15 +1533,15 @@ static bool do_page_fault(void *addr, bool pin)
|
|||
|
||||
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
|
||||
key = irq_lock();
|
||||
z_page_frame_clear(pf, Z_PAGE_FRAME_BUSY);
|
||||
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_BUSY);
|
||||
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
|
||||
z_page_frame_clear(pf, Z_PAGE_FRAME_MAPPED);
|
||||
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_MAPPED);
|
||||
frame_mapped_set(pf, addr);
|
||||
if (pin) {
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
|
||||
arch_mem_page_in(addr, z_page_frame_to_phys(pf));
|
||||
arch_mem_page_in(addr, k_mem_page_frame_to_phys(pf));
|
||||
k_mem_paging_backing_store_page_finalize(pf, page_in_location);
|
||||
out:
|
||||
irq_unlock(key);
|
||||
|
|
@ -1593,7 +1593,7 @@ bool z_page_fault(void *addr)
|
|||
|
||||
static void do_mem_unpin(void *addr)
|
||||
{
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
unsigned int key;
|
||||
uintptr_t flags, phys;
|
||||
|
||||
|
|
@ -1602,8 +1602,8 @@ static void do_mem_unpin(void *addr)
|
|||
__ASSERT((flags & ARCH_DATA_PAGE_NOT_MAPPED) == 0,
|
||||
"invalid data page at %p", addr);
|
||||
if ((flags & ARCH_DATA_PAGE_LOADED) != 0) {
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
z_page_frame_clear(pf, Z_PAGE_FRAME_PINNED);
|
||||
pf = k_mem_phys_to_page_frame(phys);
|
||||
k_mem_page_frame_clear(pf, K_MEM_PAGE_FRAME_PINNED);
|
||||
}
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,12 +26,12 @@ void arch_reserved_pages_update(void)
|
|||
uintptr_t end = ROUND_UP(addr + len, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
for (; pos < end; pos += CONFIG_MMU_PAGE_SIZE) {
|
||||
if (!z_is_page_frame(pos)) {
|
||||
if (!k_mem_is_page_frame(pos)) {
|
||||
continue;
|
||||
}
|
||||
struct z_page_frame *pf = z_phys_to_page_frame(pos);
|
||||
struct k_mem_page_frame *pf = k_mem_phys_to_page_frame(pos);
|
||||
|
||||
z_page_frame_set(pf, Z_PAGE_FRAME_RESERVED);
|
||||
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_RESERVED);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,12 +40,12 @@ void *location_to_flash(uintptr_t location)
|
|||
return UINT_TO_POINTER(ptr);
|
||||
}
|
||||
|
||||
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
|
||||
int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf,
|
||||
uintptr_t *location,
|
||||
bool page_fault)
|
||||
{
|
||||
/* Simply returns the virtual address */
|
||||
*location = POINTER_TO_UINT(z_page_frame_to_virt(pf));
|
||||
*location = POINTER_TO_UINT(k_mem_page_frame_to_virt(pf));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -67,7 +67,7 @@ void k_mem_paging_backing_store_page_in(uintptr_t location)
|
|||
CONFIG_MMU_PAGE_SIZE);
|
||||
}
|
||||
|
||||
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
|
||||
void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
|
||||
uintptr_t location)
|
||||
{
|
||||
/* Nothing to do */
|
||||
|
|
|
|||
|
|
@ -30,24 +30,24 @@
|
|||
* are freed as soon as pages are paged in, in
|
||||
* k_mem_paging_backing_store_page_finalize().
|
||||
* This implies that all data pages are treated as dirty as
|
||||
* Z_PAGE_FRAME_BACKED is never set, even if the data page was paged out before
|
||||
* K_MEM_PAGE_FRAME_BACKED is never set, even if the data page was paged out before
|
||||
* and not modified since then.
|
||||
*
|
||||
* An optimization a real backing store will want is have
|
||||
* k_mem_paging_backing_store_page_finalize() note the storage location of
|
||||
* a paged-in data page in a custom field of its associated z_page_frame, and
|
||||
* set the Z_PAGE_FRAME_BACKED bit. Invocations of
|
||||
* a paged-in data page in a custom field of its associated k_mem_page_frame, and
|
||||
* set the K_MEM_PAGE_FRAME_BACKED bit. Invocations of
|
||||
* k_mem_paging_backing_store_location_get() will have logic to return
|
||||
* the previous clean page location instead of allocating
|
||||
* a new one if Z_PAGE_FRAME_BACKED is set.
|
||||
* a new one if K_MEM_PAGE_FRAME_BACKED is set.
|
||||
*
|
||||
* This will, however, require the implementation of a clean page
|
||||
* eviction algorithm, to free backing store locations for loaded data pages
|
||||
* as the backing store fills up, and clear the Z_PAGE_FRAME_BACKED bit
|
||||
* as the backing store fills up, and clear the K_MEM_PAGE_FRAME_BACKED bit
|
||||
* appropriately.
|
||||
*
|
||||
* All of this logic is local to the backing store implementation; from the
|
||||
* core kernel's perspective the only change is that Z_PAGE_FRAME_BACKED
|
||||
* core kernel's perspective the only change is that K_MEM_PAGE_FRAME_BACKED
|
||||
* starts getting set for certain page frames after a page-in (and possibly
|
||||
* cleared at a later time).
|
||||
*/
|
||||
|
|
@ -82,7 +82,7 @@ static uintptr_t slab_to_location(void *slab)
|
|||
return offset;
|
||||
}
|
||||
|
||||
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
|
||||
int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf,
|
||||
uintptr_t *location,
|
||||
bool page_fault)
|
||||
{
|
||||
|
|
@ -122,7 +122,7 @@ void k_mem_paging_backing_store_page_in(uintptr_t location)
|
|||
CONFIG_MMU_PAGE_SIZE);
|
||||
}
|
||||
|
||||
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
|
||||
void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
|
||||
uintptr_t location)
|
||||
{
|
||||
k_mem_paging_backing_store_location_free(location);
|
||||
|
|
|
|||
|
|
@ -27,38 +27,39 @@
|
|||
static void nru_periodic_update(struct k_timer *timer)
|
||||
{
|
||||
uintptr_t phys;
|
||||
struct z_page_frame *pf;
|
||||
struct k_mem_page_frame *pf;
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
Z_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
if (!z_page_frame_is_evictable(pf)) {
|
||||
K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
if (!k_mem_page_frame_is_evictable(pf)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Clear accessed bit in page tables */
|
||||
(void)arch_page_info_get(z_page_frame_to_virt(pf), NULL, true);
|
||||
(void)arch_page_info_get(k_mem_page_frame_to_virt(pf),
|
||||
NULL, true);
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
struct z_page_frame *k_mem_paging_eviction_select(bool *dirty_ptr)
|
||||
struct k_mem_page_frame *k_mem_paging_eviction_select(bool *dirty_ptr)
|
||||
{
|
||||
unsigned int last_prec = 4U;
|
||||
struct z_page_frame *last_pf = NULL, *pf;
|
||||
struct k_mem_page_frame *last_pf = NULL, *pf;
|
||||
bool accessed;
|
||||
bool last_dirty = false;
|
||||
bool dirty = false;
|
||||
uintptr_t flags, phys;
|
||||
|
||||
Z_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
|
||||
unsigned int prec;
|
||||
|
||||
if (!z_page_frame_is_evictable(pf)) {
|
||||
if (!k_mem_page_frame_is_evictable(pf)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
flags = arch_page_info_get(z_page_frame_to_virt(pf), NULL, false);
|
||||
flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false);
|
||||
accessed = (flags & ARCH_DATA_PAGE_ACCESSED) != 0UL;
|
||||
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0UL;
|
||||
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ ZTEST(demand_paging, test_map_anon_pages)
|
|||
zassert_not_null(arena, "failed to map anonymous memory arena size %zu",
|
||||
arena_size);
|
||||
printk("Anonymous memory arena %p size %zu\n", arena, arena_size);
|
||||
z_page_frames_dump();
|
||||
k_mem_page_frames_dump();
|
||||
}
|
||||
|
||||
static void print_paging_stats(struct k_mem_paging_stats_t *stats, const char *scope)
|
||||
|
|
|
|||
Loading…
Reference in a new issue