kernel: mm: rename z_phys_un/map to k_mem_*_phys_bare
This renames z_phys_map() and z_phys_unmap() to k_mem_map_phys_bare() and k_mem_unmap_phys_bare() respectively. This is part of the series to move memory management functions away from the z_ namespace. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
295254a96b
commit
552e29790d
15 changed files with 81 additions and 77 deletions
|
|
@ -33,8 +33,8 @@ void efi_init(struct efi_boot_arg *efi_arg)
|
|||
return;
|
||||
}
|
||||
|
||||
z_phys_map((uint8_t **)&efi, (uintptr_t)efi_arg,
|
||||
sizeof(struct efi_boot_arg), 0);
|
||||
k_mem_map_phys_bare((uint8_t **)&efi, (uintptr_t)efi_arg,
|
||||
sizeof(struct efi_boot_arg), 0);
|
||||
}
|
||||
|
||||
/* EFI thunk. Not a lot of code, but lots of context:
|
||||
|
|
|
|||
|
|
@ -17,18 +17,18 @@ static uintptr_t bios_search_rsdp_buff(uintptr_t search_phy_add, uint32_t search
|
|||
{
|
||||
uint64_t *search_buff;
|
||||
|
||||
z_phys_map((uint8_t **)&search_buff, search_phy_add, search_length, 0);
|
||||
k_mem_map_phys_bare((uint8_t **)&search_buff, search_phy_add, search_length, 0);
|
||||
if (!search_buff) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < search_length / 8u; i++) {
|
||||
if (search_buff[i] == RSDP_SIGNATURE) {
|
||||
z_phys_unmap((uint8_t *)search_buff, search_length);
|
||||
k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
|
||||
return (search_phy_add + (i * 8u));
|
||||
}
|
||||
}
|
||||
z_phys_unmap((uint8_t *)search_buff, search_length);
|
||||
k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -38,10 +38,10 @@ void *bios_acpi_rsdp_get(void)
|
|||
uint8_t *bios_ext_data, *zero_page_base;
|
||||
uintptr_t search_phy_add, rsdp_phy_add;
|
||||
|
||||
z_phys_map(&zero_page_base, 0, DATA_SIZE_K(4u), 0);
|
||||
k_mem_map_phys_bare(&zero_page_base, 0, DATA_SIZE_K(4u), 0);
|
||||
bios_ext_data = EBDA_ADD + zero_page_base;
|
||||
search_phy_add = (uintptr_t)((*(uint16_t *)bios_ext_data) << 4u);
|
||||
z_phys_unmap(zero_page_base, DATA_SIZE_K(4u));
|
||||
k_mem_unmap_phys_bare(zero_page_base, DATA_SIZE_K(4u));
|
||||
|
||||
if ((search_phy_add >= BIOS_EXT_DATA_LOW) && (search_phy_add < BIOS_EXT_DATA_HIGH)) {
|
||||
rsdp_phy_add = bios_search_rsdp_buff(search_phy_add, DATA_SIZE_K(1u));
|
||||
|
|
|
|||
|
|
@ -41,8 +41,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
|
|||
*/
|
||||
info = info_pa;
|
||||
#else
|
||||
z_phys_map((uint8_t **)&info, POINTER_TO_UINT(info_pa),
|
||||
sizeof(*info_pa), K_MEM_CACHE_NONE);
|
||||
k_mem_map_phys_bare((uint8_t **)&info, POINTER_TO_UINT(info_pa),
|
||||
sizeof(*info_pa), K_MEM_CACHE_NONE);
|
||||
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
|
||||
|
||||
if (info == NULL) {
|
||||
|
|
@ -70,8 +70,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
|
|||
#else
|
||||
uint8_t *address_va;
|
||||
|
||||
z_phys_map(&address_va, info->mmap_addr, info->mmap_length,
|
||||
K_MEM_CACHE_NONE);
|
||||
k_mem_map_phys_bare(&address_va, info->mmap_addr, info->mmap_length,
|
||||
K_MEM_CACHE_NONE);
|
||||
|
||||
address = POINTER_TO_UINT(address_va);
|
||||
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */
|
||||
|
|
|
|||
|
|
@ -50,9 +50,9 @@ void dwmac_platform_init(struct dwmac_priv *p)
|
|||
desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors);
|
||||
|
||||
/* remap descriptor rings uncached */
|
||||
z_phys_map(&desc_uncached_addr, desc_phys_addr,
|
||||
sizeof(dwmac_tx_rx_descriptors),
|
||||
K_MEM_PERM_RW | K_MEM_CACHE_NONE);
|
||||
k_mem_map_phys_bare(&desc_uncached_addr, desc_phys_addr,
|
||||
sizeof(dwmac_tx_rx_descriptors),
|
||||
K_MEM_PERM_RW | K_MEM_CACHE_NONE);
|
||||
|
||||
LOG_DBG("desc virt %p uncached %p phys 0x%lx",
|
||||
dwmac_tx_rx_descriptors, desc_uncached_addr, desc_phys_addr);
|
||||
|
|
|
|||
|
|
@ -96,9 +96,9 @@ static bool map_msix_table_entries(pcie_bdf_t bdf,
|
|||
return false;
|
||||
}
|
||||
|
||||
z_phys_map((uint8_t **)&mapped_table,
|
||||
bar.phys_addr + table_offset,
|
||||
n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW);
|
||||
k_mem_map_phys_bare((uint8_t **)&mapped_table,
|
||||
bar.phys_addr + table_offset,
|
||||
n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW);
|
||||
|
||||
for (i = 0; i < n_vector; i++) {
|
||||
vectors[i].msix_vector = (struct msix_vector *)
|
||||
|
|
|
|||
|
|
@ -2167,7 +2167,7 @@ exit_disable_clk:
|
|||
|
||||
exit_unmap:
|
||||
#if defined(DEVICE_MMIO_IS_IN_RAM) && defined(CONFIG_MMU)
|
||||
z_phys_unmap((uint8_t *)DEVICE_MMIO_GET(dev), DEVICE_MMIO_ROM_PTR(dev)->size);
|
||||
k_mem_unmap_phys_bare((uint8_t *)DEVICE_MMIO_GET(dev), DEVICE_MMIO_ROM_PTR(dev)->size);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -219,9 +219,9 @@ static bool ivshmem_configure(const struct device *dev)
|
|||
LOG_ERR("Invalid state table size %zu", state_table_size);
|
||||
return false;
|
||||
}
|
||||
z_phys_map((uint8_t **)&data->state_table_shmem,
|
||||
shmem_phys_addr, state_table_size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_USER);
|
||||
k_mem_map_phys_bare((uint8_t **)&data->state_table_shmem,
|
||||
shmem_phys_addr, state_table_size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_USER);
|
||||
|
||||
/* R/W section (optional) */
|
||||
cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
|
||||
|
|
@ -229,9 +229,10 @@ static bool ivshmem_configure(const struct device *dev)
|
|||
size_t rw_section_offset = state_table_size;
|
||||
LOG_INF("RW section size 0x%zX", data->rw_section_size);
|
||||
if (data->rw_section_size > 0) {
|
||||
z_phys_map((uint8_t **)&data->rw_section_shmem,
|
||||
shmem_phys_addr + rw_section_offset, data->rw_section_size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
k_mem_map_phys_bare((uint8_t **)&data->rw_section_shmem,
|
||||
shmem_phys_addr + rw_section_offset,
|
||||
data->rw_section_size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
}
|
||||
|
||||
/* Output sections */
|
||||
|
|
@ -249,8 +250,8 @@ static bool ivshmem_configure(const struct device *dev)
|
|||
if (i == regs->id) {
|
||||
flags |= K_MEM_PERM_RW;
|
||||
}
|
||||
z_phys_map((uint8_t **)&data->output_section_shmem[i],
|
||||
phys_addr, data->output_section_size, flags);
|
||||
k_mem_map_phys_bare((uint8_t **)&data->output_section_shmem[i],
|
||||
phys_addr, data->output_section_size, flags);
|
||||
}
|
||||
|
||||
data->size = output_section_offset +
|
||||
|
|
@ -273,9 +274,9 @@ static bool ivshmem_configure(const struct device *dev)
|
|||
|
||||
data->size = mbar_shmem.size;
|
||||
|
||||
z_phys_map((uint8_t **)&data->shmem,
|
||||
shmem_phys_addr, data->size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
k_mem_map_phys_bare((uint8_t **)&data->shmem,
|
||||
shmem_phys_addr, data->size,
|
||||
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
}
|
||||
|
||||
if (msi_x_bar_present) {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_
|
||||
|
||||
/*
|
||||
* Define ARM specific memory flags used by z_phys_map()
|
||||
* Define ARM specific memory flags used by k_mem_map_phys_bare()
|
||||
* followed public definitions in include/kernel/mm.h.
|
||||
*/
|
||||
/* For ARM64, K_MEM_CACHE_NONE is nGnRnE. */
|
||||
|
|
|
|||
|
|
@ -140,6 +140,9 @@ extern "C" {
|
|||
* linear address representing the base of where the physical region is mapped
|
||||
* in the virtual address space for the Zephyr kernel.
|
||||
*
|
||||
* The memory mapped via this function must be unmapped using
|
||||
* k_mem_unmap_phys_bare().
|
||||
*
|
||||
* This function alters the active page tables in the area reserved
|
||||
* for the kernel. This function will choose the virtual address
|
||||
* and return it to the caller.
|
||||
|
|
@ -173,8 +176,8 @@ extern "C" {
|
|||
* @param[in] size Size of the memory region
|
||||
* @param[in] flags Caching mode and access flags, see K_MAP_* macros
|
||||
*/
|
||||
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
|
||||
uint32_t flags);
|
||||
void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size,
|
||||
uint32_t flags);
|
||||
|
||||
/**
|
||||
* Unmap a virtual memory region from kernel's virtual address space.
|
||||
|
|
@ -188,7 +191,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
|
|||
*
|
||||
* This will align the input parameters to page boundaries so that
|
||||
* this can be used with the virtual address as returned by
|
||||
* z_phys_map().
|
||||
* k_mem_map_phys_bare().
|
||||
*
|
||||
* This API is only available if CONFIG_MMU is enabled.
|
||||
*
|
||||
|
|
@ -203,7 +206,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
|
|||
* @param virt Starting address of the virtual address region to be unmapped.
|
||||
* @param size Size of the virtual address region
|
||||
*/
|
||||
void z_phys_unmap(uint8_t *virt, size_t size);
|
||||
void k_mem_unmap_phys_bare(uint8_t *virt, size_t size);
|
||||
|
||||
/**
|
||||
* Map memory into virtual address space with guard pages.
|
||||
|
|
|
|||
|
|
@ -101,8 +101,8 @@ static inline void device_map(mm_reg_t *virt_addr, uintptr_t phys_addr,
|
|||
/* Pass along flags and add that we want supervisor mode
|
||||
* read-write access.
|
||||
*/
|
||||
z_phys_map((uint8_t **)virt_addr, phys_addr, size,
|
||||
flags | K_MEM_PERM_RW);
|
||||
k_mem_map_phys_bare((uint8_t **)virt_addr, phys_addr, size,
|
||||
flags | K_MEM_PERM_RW);
|
||||
#else
|
||||
ARG_UNUSED(size);
|
||||
ARG_UNUSED(flags);
|
||||
|
|
|
|||
|
|
@ -417,8 +417,8 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
|
|||
#ifdef CONFIG_MMU
|
||||
/* Invoked here such that backing store or eviction algorithms may
|
||||
* initialize kernel objects, and that all POST_KERNEL and later tasks
|
||||
* may perform memory management tasks (except for z_phys_map() which
|
||||
* is allowed at any time)
|
||||
* may perform memory management tasks (except for
|
||||
* k_mem_map_phys_bare() which is allowed at any time)
|
||||
*/
|
||||
z_mem_manage_init();
|
||||
#endif /* CONFIG_MMU */
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ static void virt_region_init(void)
|
|||
size_t offset, num_bits;
|
||||
|
||||
/* There are regions where we should never map via
|
||||
* k_mem_map() and z_phys_map(). Mark them as
|
||||
* k_mem_map() and k_mem_map_phys_bare(). Mark them as
|
||||
* already allocated so they will never be used.
|
||||
*/
|
||||
|
||||
|
|
@ -791,7 +791,7 @@ __weak FUNC_ALIAS(virt_region_align, arch_virt_region_align, size_t);
|
|||
* Data will be copied and BSS zeroed, but this must not rely on any
|
||||
* initialization functions being called prior to work correctly.
|
||||
*/
|
||||
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
|
||||
void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
|
||||
{
|
||||
uintptr_t aligned_phys, addr_offset;
|
||||
size_t aligned_size, align_boundary;
|
||||
|
|
@ -878,7 +878,7 @@ fail:
|
|||
k_panic();
|
||||
}
|
||||
|
||||
void z_phys_unmap(uint8_t *virt, size_t size)
|
||||
void k_mem_unmap_phys_bare(uint8_t *virt, size_t size)
|
||||
{
|
||||
uintptr_t aligned_virt, addr_offset;
|
||||
size_t aligned_size;
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@
|
|||
#define BASE_FLAGS (K_MEM_CACHE_WB)
|
||||
volatile bool expect_fault;
|
||||
|
||||
/* z_phys_map() doesn't have alignment requirements, any oddly-sized buffer
|
||||
/* k_mem_map_phys_bare() doesn't have alignment requirements, any oddly-sized buffer
|
||||
* can get mapped. BUF_SIZE has a odd size to make sure the mapped buffer
|
||||
* spans multiple pages.
|
||||
*/
|
||||
|
|
@ -52,7 +52,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
|
|||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
ZTEST(mem_map, test_z_phys_map_rw)
|
||||
ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
|
||||
{
|
||||
uint8_t *mapped_rw, *mapped_ro;
|
||||
uint8_t *buf = test_page + BUF_OFFSET;
|
||||
|
|
@ -60,12 +60,12 @@ ZTEST(mem_map, test_z_phys_map_rw)
|
|||
expect_fault = false;
|
||||
|
||||
/* Map in a page that allows writes */
|
||||
z_phys_map(&mapped_rw, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
|
||||
k_mem_map_phys_bare(&mapped_rw, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
||||
/* Map again this time only allowing reads */
|
||||
z_phys_map(&mapped_ro, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
|
||||
/* Initialize read-write buf with some bytes */
|
||||
for (int i = 0; i < BUF_SIZE; i++) {
|
||||
|
|
@ -122,7 +122,7 @@ static void transplanted_function(bool *executed)
|
|||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
ZTEST(mem_map, test_z_phys_map_exec)
|
||||
ZTEST(mem_map, test_k_mem_map_phys_bare_exec)
|
||||
{
|
||||
#ifndef SKIP_EXECUTE_TESTS
|
||||
uint8_t *mapped_exec, *mapped_ro;
|
||||
|
|
@ -138,17 +138,18 @@ ZTEST(mem_map, test_z_phys_map_exec)
|
|||
func = transplanted_function;
|
||||
|
||||
/* Now map with execution enabled and try to run the copied fn */
|
||||
z_phys_map(&mapped_exec, z_mem_phys_addr(__test_mem_map_start),
|
||||
(uintptr_t)(__test_mem_map_end - __test_mem_map_start),
|
||||
BASE_FLAGS | K_MEM_PERM_EXEC);
|
||||
k_mem_map_phys_bare(&mapped_exec, z_mem_phys_addr(__test_mem_map_start),
|
||||
(uintptr_t)(__test_mem_map_end - __test_mem_map_start),
|
||||
BASE_FLAGS | K_MEM_PERM_EXEC);
|
||||
|
||||
func = (void (*)(bool *executed))mapped_exec;
|
||||
func(&executed);
|
||||
zassert_true(executed, "function did not execute");
|
||||
|
||||
/* Now map without execution and execution should now fail */
|
||||
z_phys_map(&mapped_ro, z_mem_phys_addr(__test_mem_map_start),
|
||||
(uintptr_t)(__test_mem_map_end - __test_mem_map_start), BASE_FLAGS);
|
||||
k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(__test_mem_map_start),
|
||||
(uintptr_t)(__test_mem_map_end - __test_mem_map_start),
|
||||
BASE_FLAGS);
|
||||
|
||||
func = (void (*)(bool *executed))mapped_ro;
|
||||
expect_fault = true;
|
||||
|
|
@ -166,18 +167,18 @@ ZTEST(mem_map, test_z_phys_map_exec)
|
|||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
ZTEST(mem_map, test_z_phys_map_side_effect)
|
||||
ZTEST(mem_map, test_k_mem_map_phys_bare_side_effect)
|
||||
{
|
||||
uint8_t *mapped;
|
||||
|
||||
expect_fault = false;
|
||||
|
||||
/* z_phys_map() is supposed to always create fresh mappings.
|
||||
/* k_mem_map_phys_bare() is supposed to always create fresh mappings.
|
||||
* Show that by mapping test_page to an RO region, we can still
|
||||
* modify test_page.
|
||||
*/
|
||||
z_phys_map(&mapped, z_mem_phys_addr(test_page),
|
||||
sizeof(test_page), BASE_FLAGS);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
|
||||
sizeof(test_page), BASE_FLAGS);
|
||||
|
||||
/* Should NOT fault */
|
||||
test_page[0] = 42;
|
||||
|
|
@ -190,26 +191,26 @@ ZTEST(mem_map, test_z_phys_map_side_effect)
|
|||
}
|
||||
|
||||
/**
|
||||
* Test that z_phys_unmap() unmaps the memory and it is no longer
|
||||
* Test that k_mem_unmap_phys_bare() unmaps the memory and it is no longer
|
||||
* accessible afterwards.
|
||||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
ZTEST(mem_map, test_z_phys_unmap)
|
||||
ZTEST(mem_map, test_k_mem_unmap_phys_bare)
|
||||
{
|
||||
uint8_t *mapped;
|
||||
|
||||
expect_fault = false;
|
||||
|
||||
/* Map in a page that allows writes */
|
||||
z_phys_map(&mapped, z_mem_phys_addr(test_page),
|
||||
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
|
||||
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
||||
/* Should NOT fault */
|
||||
mapped[0] = 42;
|
||||
|
||||
/* Unmap the memory */
|
||||
z_phys_unmap(mapped, sizeof(test_page));
|
||||
k_mem_unmap_phys_bare(mapped, sizeof(test_page));
|
||||
|
||||
/* Should fault since test_page is no longer accessible */
|
||||
expect_fault = true;
|
||||
|
|
@ -219,18 +220,18 @@ ZTEST(mem_map, test_z_phys_unmap)
|
|||
}
|
||||
|
||||
/**
|
||||
* Show that z_phys_unmap() can reclaim the virtual region correctly.
|
||||
* Show that k_mem_unmap_phys_bare() can reclaim the virtual region correctly.
|
||||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
ZTEST(mem_map, test_z_phys_map_unmap_reclaim_addr)
|
||||
ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
|
||||
{
|
||||
uint8_t *mapped, *mapped_old;
|
||||
uint8_t *buf = test_page + BUF_OFFSET;
|
||||
|
||||
/* Map the buffer the first time. */
|
||||
z_phys_map(&mapped, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
|
||||
printk("Mapped (1st time): %p\n", mapped);
|
||||
|
||||
|
|
@ -240,18 +241,17 @@ ZTEST(mem_map, test_z_phys_map_unmap_reclaim_addr)
|
|||
/*
|
||||
* Unmap the buffer.
|
||||
* This should reclaim the bits in virtual region tracking,
|
||||
* so that the next time z_phys_map() is called with
|
||||
* so that the next time k_mem_map_phys_bare() is called with
|
||||
* the same arguments, it will return the same address.
|
||||
*/
|
||||
z_phys_unmap(mapped, BUF_SIZE);
|
||||
k_mem_unmap_phys_bare(mapped, BUF_SIZE);
|
||||
|
||||
/*
|
||||
* Map again the same buffer using same parameters.
|
||||
* It should give us back the same virtual address
|
||||
* as above when it is mapped the first time.
|
||||
*/
|
||||
z_phys_map(&mapped, z_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf), BUF_SIZE, BASE_FLAGS);
|
||||
|
||||
printk("Mapped (2nd time): %p\n", mapped);
|
||||
|
||||
|
|
@ -508,8 +508,8 @@ ZTEST(mem_map_api, test_k_mem_map_user)
|
|||
*/
|
||||
expect_fault = false;
|
||||
|
||||
z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
|
||||
BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
|
||||
BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||
|
||||
printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped,
|
||||
mapped + CONFIG_MMU_PAGE_SIZE);
|
||||
|
|
@ -521,7 +521,7 @@ ZTEST(mem_map_api, test_k_mem_map_user)
|
|||
k_thread_join(&user_thread, K_FOREVER);
|
||||
|
||||
/* Unmap the memory */
|
||||
z_phys_unmap(mapped, sizeof(test_page));
|
||||
k_mem_unmap_phys_bare(mapped, sizeof(test_page));
|
||||
|
||||
/*
|
||||
* Map the region without using K_MEM_PERM_USER and try to access it
|
||||
|
|
@ -529,8 +529,8 @@ ZTEST(mem_map_api, test_k_mem_map_user)
|
|||
*/
|
||||
expect_fault = true;
|
||||
|
||||
z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
|
||||
BASE_FLAGS | K_MEM_PERM_RW);
|
||||
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
|
||||
BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
||||
printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped,
|
||||
mapped + CONFIG_MMU_PAGE_SIZE);
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ static void smh_reg_map(struct shared_multi_heap_region *region)
|
|||
mem_attr = (region->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE;
|
||||
mem_attr |= K_MEM_PERM_RW;
|
||||
|
||||
z_phys_map(&v_addr, region->addr, region->size, mem_attr);
|
||||
k_mem_map_phys_bare(&v_addr, region->addr, region->size, mem_attr);
|
||||
|
||||
region->addr = (uintptr_t) v_addr;
|
||||
}
|
||||
|
|
|
|||
2
west.yml
2
west.yml
|
|
@ -30,7 +30,7 @@ manifest:
|
|||
# Please add items below based on alphabetical order
|
||||
projects:
|
||||
- name: acpica
|
||||
revision: da5f2721e1c7f188fe04aa50af76f4b94f3c3ea3
|
||||
revision: 8d24867bc9c9d81c81eeac59391cda59333affd4
|
||||
path: modules/lib/acpica
|
||||
- name: bsim
|
||||
repo-path: babblesim-manifest
|
||||
|
|
|
|||
Loading…
Reference in a new issue