kernel: mm: rename z_phys_un/map to k_mem_*_phys_bare

This renames z_phys_map() and z_phys_unmap() to
k_mem_map_phys_bare() and k_mem_unmap_phys_bare()
respectively. This is part of the series to move memory
management functions away from the z_ namespace.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-06-06 09:26:10 -07:00 committed by Anas Nashif
parent 295254a96b
commit 552e29790d
15 changed files with 81 additions and 77 deletions

View file

@ -33,8 +33,8 @@ void efi_init(struct efi_boot_arg *efi_arg)
return; return;
} }
z_phys_map((uint8_t **)&efi, (uintptr_t)efi_arg, k_mem_map_phys_bare((uint8_t **)&efi, (uintptr_t)efi_arg,
sizeof(struct efi_boot_arg), 0); sizeof(struct efi_boot_arg), 0);
} }
/* EFI thunk. Not a lot of code, but lots of context: /* EFI thunk. Not a lot of code, but lots of context:

View file

@ -17,18 +17,18 @@ static uintptr_t bios_search_rsdp_buff(uintptr_t search_phy_add, uint32_t search
{ {
uint64_t *search_buff; uint64_t *search_buff;
z_phys_map((uint8_t **)&search_buff, search_phy_add, search_length, 0); k_mem_map_phys_bare((uint8_t **)&search_buff, search_phy_add, search_length, 0);
if (!search_buff) { if (!search_buff) {
return 0; return 0;
} }
for (int i = 0; i < search_length / 8u; i++) { for (int i = 0; i < search_length / 8u; i++) {
if (search_buff[i] == RSDP_SIGNATURE) { if (search_buff[i] == RSDP_SIGNATURE) {
z_phys_unmap((uint8_t *)search_buff, search_length); k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
return (search_phy_add + (i * 8u)); return (search_phy_add + (i * 8u));
} }
} }
z_phys_unmap((uint8_t *)search_buff, search_length); k_mem_unmap_phys_bare((uint8_t *)search_buff, search_length);
return 0; return 0;
} }
@ -38,10 +38,10 @@ void *bios_acpi_rsdp_get(void)
uint8_t *bios_ext_data, *zero_page_base; uint8_t *bios_ext_data, *zero_page_base;
uintptr_t search_phy_add, rsdp_phy_add; uintptr_t search_phy_add, rsdp_phy_add;
z_phys_map(&zero_page_base, 0, DATA_SIZE_K(4u), 0); k_mem_map_phys_bare(&zero_page_base, 0, DATA_SIZE_K(4u), 0);
bios_ext_data = EBDA_ADD + zero_page_base; bios_ext_data = EBDA_ADD + zero_page_base;
search_phy_add = (uintptr_t)((*(uint16_t *)bios_ext_data) << 4u); search_phy_add = (uintptr_t)((*(uint16_t *)bios_ext_data) << 4u);
z_phys_unmap(zero_page_base, DATA_SIZE_K(4u)); k_mem_unmap_phys_bare(zero_page_base, DATA_SIZE_K(4u));
if ((search_phy_add >= BIOS_EXT_DATA_LOW) && (search_phy_add < BIOS_EXT_DATA_HIGH)) { if ((search_phy_add >= BIOS_EXT_DATA_LOW) && (search_phy_add < BIOS_EXT_DATA_HIGH)) {
rsdp_phy_add = bios_search_rsdp_buff(search_phy_add, DATA_SIZE_K(1u)); rsdp_phy_add = bios_search_rsdp_buff(search_phy_add, DATA_SIZE_K(1u));

View file

@ -41,8 +41,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
*/ */
info = info_pa; info = info_pa;
#else #else
z_phys_map((uint8_t **)&info, POINTER_TO_UINT(info_pa), k_mem_map_phys_bare((uint8_t **)&info, POINTER_TO_UINT(info_pa),
sizeof(*info_pa), K_MEM_CACHE_NONE); sizeof(*info_pa), K_MEM_CACHE_NONE);
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */ #endif /* CONFIG_ARCH_MAPS_ALL_RAM */
if (info == NULL) { if (info == NULL) {
@ -70,8 +70,8 @@ void z_multiboot_init(struct multiboot_info *info_pa)
#else #else
uint8_t *address_va; uint8_t *address_va;
z_phys_map(&address_va, info->mmap_addr, info->mmap_length, k_mem_map_phys_bare(&address_va, info->mmap_addr, info->mmap_length,
K_MEM_CACHE_NONE); K_MEM_CACHE_NONE);
address = POINTER_TO_UINT(address_va); address = POINTER_TO_UINT(address_va);
#endif /* CONFIG_ARCH_MAPS_ALL_RAM */ #endif /* CONFIG_ARCH_MAPS_ALL_RAM */

View file

@ -50,9 +50,9 @@ void dwmac_platform_init(struct dwmac_priv *p)
desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors); desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors);
/* remap descriptor rings uncached */ /* remap descriptor rings uncached */
z_phys_map(&desc_uncached_addr, desc_phys_addr, k_mem_map_phys_bare(&desc_uncached_addr, desc_phys_addr,
sizeof(dwmac_tx_rx_descriptors), sizeof(dwmac_tx_rx_descriptors),
K_MEM_PERM_RW | K_MEM_CACHE_NONE); K_MEM_PERM_RW | K_MEM_CACHE_NONE);
LOG_DBG("desc virt %p uncached %p phys 0x%lx", LOG_DBG("desc virt %p uncached %p phys 0x%lx",
dwmac_tx_rx_descriptors, desc_uncached_addr, desc_phys_addr); dwmac_tx_rx_descriptors, desc_uncached_addr, desc_phys_addr);

View file

@ -96,9 +96,9 @@ static bool map_msix_table_entries(pcie_bdf_t bdf,
return false; return false;
} }
z_phys_map((uint8_t **)&mapped_table, k_mem_map_phys_bare((uint8_t **)&mapped_table,
bar.phys_addr + table_offset, bar.phys_addr + table_offset,
n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW); n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW);
for (i = 0; i < n_vector; i++) { for (i = 0; i < n_vector; i++) {
vectors[i].msix_vector = (struct msix_vector *) vectors[i].msix_vector = (struct msix_vector *)

View file

@ -2167,7 +2167,7 @@ exit_disable_clk:
exit_unmap: exit_unmap:
#if defined(DEVICE_MMIO_IS_IN_RAM) && defined(CONFIG_MMU) #if defined(DEVICE_MMIO_IS_IN_RAM) && defined(CONFIG_MMU)
z_phys_unmap((uint8_t *)DEVICE_MMIO_GET(dev), DEVICE_MMIO_ROM_PTR(dev)->size); k_mem_unmap_phys_bare((uint8_t *)DEVICE_MMIO_GET(dev), DEVICE_MMIO_ROM_PTR(dev)->size);
#endif #endif
return ret; return ret;
} }

View file

@ -219,9 +219,9 @@ static bool ivshmem_configure(const struct device *dev)
LOG_ERR("Invalid state table size %zu", state_table_size); LOG_ERR("Invalid state table size %zu", state_table_size);
return false; return false;
} }
z_phys_map((uint8_t **)&data->state_table_shmem, k_mem_map_phys_bare((uint8_t **)&data->state_table_shmem,
shmem_phys_addr, state_table_size, shmem_phys_addr, state_table_size,
K_MEM_CACHE_WB | K_MEM_PERM_USER); K_MEM_CACHE_WB | K_MEM_PERM_USER);
/* R/W section (optional) */ /* R/W section (optional) */
cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4; cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
@ -229,9 +229,10 @@ static bool ivshmem_configure(const struct device *dev)
size_t rw_section_offset = state_table_size; size_t rw_section_offset = state_table_size;
LOG_INF("RW section size 0x%zX", data->rw_section_size); LOG_INF("RW section size 0x%zX", data->rw_section_size);
if (data->rw_section_size > 0) { if (data->rw_section_size > 0) {
z_phys_map((uint8_t **)&data->rw_section_shmem, k_mem_map_phys_bare((uint8_t **)&data->rw_section_shmem,
shmem_phys_addr + rw_section_offset, data->rw_section_size, shmem_phys_addr + rw_section_offset,
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER); data->rw_section_size,
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
} }
/* Output sections */ /* Output sections */
@ -249,8 +250,8 @@ static bool ivshmem_configure(const struct device *dev)
if (i == regs->id) { if (i == regs->id) {
flags |= K_MEM_PERM_RW; flags |= K_MEM_PERM_RW;
} }
z_phys_map((uint8_t **)&data->output_section_shmem[i], k_mem_map_phys_bare((uint8_t **)&data->output_section_shmem[i],
phys_addr, data->output_section_size, flags); phys_addr, data->output_section_size, flags);
} }
data->size = output_section_offset + data->size = output_section_offset +
@ -273,9 +274,9 @@ static bool ivshmem_configure(const struct device *dev)
data->size = mbar_shmem.size; data->size = mbar_shmem.size;
z_phys_map((uint8_t **)&data->shmem, k_mem_map_phys_bare((uint8_t **)&data->shmem,
shmem_phys_addr, data->size, shmem_phys_addr, data->size,
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER); K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
} }
if (msi_x_bar_present) { if (msi_x_bar_present) {

View file

@ -7,7 +7,7 @@
#define ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_ #define ZEPHYR_INCLUDE_ARCH_ARM64_ARM_MEM_H_
/* /*
* Define ARM specific memory flags used by z_phys_map() * Define ARM specific memory flags used by k_mem_map_phys_bare()
* followed public definitions in include/kernel/mm.h. * followed public definitions in include/kernel/mm.h.
*/ */
/* For ARM64, K_MEM_CACHE_NONE is nGnRnE. */ /* For ARM64, K_MEM_CACHE_NONE is nGnRnE. */

View file

@ -140,6 +140,9 @@ extern "C" {
* linear address representing the base of where the physical region is mapped * linear address representing the base of where the physical region is mapped
* in the virtual address space for the Zephyr kernel. * in the virtual address space for the Zephyr kernel.
* *
* The memory mapped via this function must be unmapped using
* k_mem_unmap_phys_bare().
*
* This function alters the active page tables in the area reserved * This function alters the active page tables in the area reserved
* for the kernel. This function will choose the virtual address * for the kernel. This function will choose the virtual address
* and return it to the caller. * and return it to the caller.
@ -173,8 +176,8 @@ extern "C" {
* @param[in] size Size of the memory region * @param[in] size Size of the memory region
* @param[in] flags Caching mode and access flags, see K_MAP_* macros * @param[in] flags Caching mode and access flags, see K_MAP_* macros
*/ */
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size,
uint32_t flags); uint32_t flags);
/** /**
* Unmap a virtual memory region from kernel's virtual address space. * Unmap a virtual memory region from kernel's virtual address space.
@ -188,7 +191,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
* *
* This will align the input parameters to page boundaries so that * This will align the input parameters to page boundaries so that
* this can be used with the virtual address as returned by * this can be used with the virtual address as returned by
* z_phys_map(). * k_mem_map_phys_bare().
* *
* This API is only available if CONFIG_MMU is enabled. * This API is only available if CONFIG_MMU is enabled.
* *
@ -203,7 +206,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
* @param virt Starting address of the virtual address region to be unmapped. * @param virt Starting address of the virtual address region to be unmapped.
* @param size Size of the virtual address region * @param size Size of the virtual address region
*/ */
void z_phys_unmap(uint8_t *virt, size_t size); void k_mem_unmap_phys_bare(uint8_t *virt, size_t size);
/** /**
* Map memory into virtual address space with guard pages. * Map memory into virtual address space with guard pages.

View file

@ -101,8 +101,8 @@ static inline void device_map(mm_reg_t *virt_addr, uintptr_t phys_addr,
/* Pass along flags and add that we want supervisor mode /* Pass along flags and add that we want supervisor mode
* read-write access. * read-write access.
*/ */
z_phys_map((uint8_t **)virt_addr, phys_addr, size, k_mem_map_phys_bare((uint8_t **)virt_addr, phys_addr, size,
flags | K_MEM_PERM_RW); flags | K_MEM_PERM_RW);
#else #else
ARG_UNUSED(size); ARG_UNUSED(size);
ARG_UNUSED(flags); ARG_UNUSED(flags);

View file

@ -417,8 +417,8 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* Invoked here such that backing store or eviction algorithms may /* Invoked here such that backing store or eviction algorithms may
* initialize kernel objects, and that all POST_KERNEL and later tasks * initialize kernel objects, and that all POST_KERNEL and later tasks
* may perform memory management tasks (except for z_phys_map() which * may perform memory management tasks (except for
* is allowed at any time) * k_mem_map_phys_bare() which is allowed at any time)
*/ */
z_mem_manage_init(); z_mem_manage_init();
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */

View file

@ -215,7 +215,7 @@ static void virt_region_init(void)
size_t offset, num_bits; size_t offset, num_bits;
/* There are regions where we should never map via /* There are regions where we should never map via
* k_mem_map() and z_phys_map(). Mark them as * k_mem_map() and k_mem_map_phys_bare(). Mark them as
* already allocated so they will never be used. * already allocated so they will never be used.
*/ */
@ -791,7 +791,7 @@ __weak FUNC_ALIAS(virt_region_align, arch_virt_region_align, size_t);
* Data will be copied and BSS zeroed, but this must not rely on any * Data will be copied and BSS zeroed, but this must not rely on any
* initialization functions being called prior to work correctly. * initialization functions being called prior to work correctly.
*/ */
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags) void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
{ {
uintptr_t aligned_phys, addr_offset; uintptr_t aligned_phys, addr_offset;
size_t aligned_size, align_boundary; size_t aligned_size, align_boundary;
@ -878,7 +878,7 @@ fail:
k_panic(); k_panic();
} }
void z_phys_unmap(uint8_t *virt, size_t size) void k_mem_unmap_phys_bare(uint8_t *virt, size_t size)
{ {
uintptr_t aligned_virt, addr_offset; uintptr_t aligned_virt, addr_offset;
size_t aligned_size; size_t aligned_size;

View file

@ -21,7 +21,7 @@
#define BASE_FLAGS (K_MEM_CACHE_WB) #define BASE_FLAGS (K_MEM_CACHE_WB)
volatile bool expect_fault; volatile bool expect_fault;
/* z_phys_map() doesn't have alignment requirements, any oddly-sized buffer /* k_mem_map_phys_bare() doesn't have alignment requirements, any oddly-sized buffer
* can get mapped. BUF_SIZE has a odd size to make sure the mapped buffer * can get mapped. BUF_SIZE has a odd size to make sure the mapped buffer
* spans multiple pages. * spans multiple pages.
*/ */
@ -52,7 +52,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
*/ */
ZTEST(mem_map, test_z_phys_map_rw) ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
{ {
uint8_t *mapped_rw, *mapped_ro; uint8_t *mapped_rw, *mapped_ro;
uint8_t *buf = test_page + BUF_OFFSET; uint8_t *buf = test_page + BUF_OFFSET;
@ -60,12 +60,12 @@ ZTEST(mem_map, test_z_phys_map_rw)
expect_fault = false; expect_fault = false;
/* Map in a page that allows writes */ /* Map in a page that allows writes */
z_phys_map(&mapped_rw, z_mem_phys_addr(buf), k_mem_map_phys_bare(&mapped_rw, z_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW); BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
/* Map again this time only allowing reads */ /* Map again this time only allowing reads */
z_phys_map(&mapped_ro, z_mem_phys_addr(buf), k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS); BUF_SIZE, BASE_FLAGS);
/* Initialize read-write buf with some bytes */ /* Initialize read-write buf with some bytes */
for (int i = 0; i < BUF_SIZE; i++) { for (int i = 0; i < BUF_SIZE; i++) {
@ -122,7 +122,7 @@ static void transplanted_function(bool *executed)
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
*/ */
ZTEST(mem_map, test_z_phys_map_exec) ZTEST(mem_map, test_k_mem_map_phys_bare_exec)
{ {
#ifndef SKIP_EXECUTE_TESTS #ifndef SKIP_EXECUTE_TESTS
uint8_t *mapped_exec, *mapped_ro; uint8_t *mapped_exec, *mapped_ro;
@ -138,17 +138,18 @@ ZTEST(mem_map, test_z_phys_map_exec)
func = transplanted_function; func = transplanted_function;
/* Now map with execution enabled and try to run the copied fn */ /* Now map with execution enabled and try to run the copied fn */
z_phys_map(&mapped_exec, z_mem_phys_addr(__test_mem_map_start), k_mem_map_phys_bare(&mapped_exec, z_mem_phys_addr(__test_mem_map_start),
(uintptr_t)(__test_mem_map_end - __test_mem_map_start), (uintptr_t)(__test_mem_map_end - __test_mem_map_start),
BASE_FLAGS | K_MEM_PERM_EXEC); BASE_FLAGS | K_MEM_PERM_EXEC);
func = (void (*)(bool *executed))mapped_exec; func = (void (*)(bool *executed))mapped_exec;
func(&executed); func(&executed);
zassert_true(executed, "function did not execute"); zassert_true(executed, "function did not execute");
/* Now map without execution and execution should now fail */ /* Now map without execution and execution should now fail */
z_phys_map(&mapped_ro, z_mem_phys_addr(__test_mem_map_start), k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(__test_mem_map_start),
(uintptr_t)(__test_mem_map_end - __test_mem_map_start), BASE_FLAGS); (uintptr_t)(__test_mem_map_end - __test_mem_map_start),
BASE_FLAGS);
func = (void (*)(bool *executed))mapped_ro; func = (void (*)(bool *executed))mapped_ro;
expect_fault = true; expect_fault = true;
@ -166,18 +167,18 @@ ZTEST(mem_map, test_z_phys_map_exec)
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
*/ */
ZTEST(mem_map, test_z_phys_map_side_effect) ZTEST(mem_map, test_k_mem_map_phys_bare_side_effect)
{ {
uint8_t *mapped; uint8_t *mapped;
expect_fault = false; expect_fault = false;
/* z_phys_map() is supposed to always create fresh mappings. /* k_mem_map_phys_bare() is supposed to always create fresh mappings.
* Show that by mapping test_page to an RO region, we can still * Show that by mapping test_page to an RO region, we can still
* modify test_page. * modify test_page.
*/ */
z_phys_map(&mapped, z_mem_phys_addr(test_page), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
sizeof(test_page), BASE_FLAGS); sizeof(test_page), BASE_FLAGS);
/* Should NOT fault */ /* Should NOT fault */
test_page[0] = 42; test_page[0] = 42;
@ -190,26 +191,26 @@ ZTEST(mem_map, test_z_phys_map_side_effect)
} }
/** /**
* Test that z_phys_unmap() unmaps the memory and it is no longer * Test that k_mem_unmap_phys_bare() unmaps the memory and it is no longer
* accessible afterwards. * accessible afterwards.
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
*/ */
ZTEST(mem_map, test_z_phys_unmap) ZTEST(mem_map, test_k_mem_unmap_phys_bare)
{ {
uint8_t *mapped; uint8_t *mapped;
expect_fault = false; expect_fault = false;
/* Map in a page that allows writes */ /* Map in a page that allows writes */
z_phys_map(&mapped, z_mem_phys_addr(test_page), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW); sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
/* Should NOT fault */ /* Should NOT fault */
mapped[0] = 42; mapped[0] = 42;
/* Unmap the memory */ /* Unmap the memory */
z_phys_unmap(mapped, sizeof(test_page)); k_mem_unmap_phys_bare(mapped, sizeof(test_page));
/* Should fault since test_page is no longer accessible */ /* Should fault since test_page is no longer accessible */
expect_fault = true; expect_fault = true;
@ -219,18 +220,18 @@ ZTEST(mem_map, test_z_phys_unmap)
} }
/** /**
* Show that z_phys_unmap() can reclaim the virtual region correctly. * Show that k_mem_unmap_phys_bare() can reclaim the virtual region correctly.
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
*/ */
ZTEST(mem_map, test_z_phys_map_unmap_reclaim_addr) ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
{ {
uint8_t *mapped, *mapped_old; uint8_t *mapped, *mapped_old;
uint8_t *buf = test_page + BUF_OFFSET; uint8_t *buf = test_page + BUF_OFFSET;
/* Map the buffer the first time. */ /* Map the buffer the first time. */
z_phys_map(&mapped, z_mem_phys_addr(buf), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS); BUF_SIZE, BASE_FLAGS);
printk("Mapped (1st time): %p\n", mapped); printk("Mapped (1st time): %p\n", mapped);
@ -240,18 +241,17 @@ ZTEST(mem_map, test_z_phys_map_unmap_reclaim_addr)
/* /*
* Unmap the buffer. * Unmap the buffer.
* This should reclaim the bits in virtual region tracking, * This should reclaim the bits in virtual region tracking,
* so that the next time z_phys_map() is called with * so that the next time k_mem_map_phys_bare() is called with
* the same arguments, it will return the same address. * the same arguments, it will return the same address.
*/ */
z_phys_unmap(mapped, BUF_SIZE); k_mem_unmap_phys_bare(mapped, BUF_SIZE);
/* /*
* Map again the same buffer using same parameters. * Map again the same buffer using same parameters.
* It should give us back the same virtual address * It should give us back the same virtual address
* as above when it is mapped the first time. * as above when it is mapped the first time.
*/ */
z_phys_map(&mapped, z_mem_phys_addr(buf), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf), BUF_SIZE, BASE_FLAGS);
BUF_SIZE, BASE_FLAGS);
printk("Mapped (2nd time): %p\n", mapped); printk("Mapped (2nd time): %p\n", mapped);
@ -508,8 +508,8 @@ ZTEST(mem_map_api, test_k_mem_map_user)
*/ */
expect_fault = false; expect_fault = false;
z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER); BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped, printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped,
mapped + CONFIG_MMU_PAGE_SIZE); mapped + CONFIG_MMU_PAGE_SIZE);
@ -521,7 +521,7 @@ ZTEST(mem_map_api, test_k_mem_map_user)
k_thread_join(&user_thread, K_FOREVER); k_thread_join(&user_thread, K_FOREVER);
/* Unmap the memory */ /* Unmap the memory */
z_phys_unmap(mapped, sizeof(test_page)); k_mem_unmap_phys_bare(mapped, sizeof(test_page));
/* /*
* Map the region without using K_MEM_PERM_USER and try to access it * Map the region without using K_MEM_PERM_USER and try to access it
@ -529,8 +529,8 @@ ZTEST(mem_map_api, test_k_mem_map_user)
*/ */
expect_fault = true; expect_fault = true;
z_phys_map(&mapped, z_mem_phys_addr(test_page), sizeof(test_page), k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
BASE_FLAGS | K_MEM_PERM_RW); BASE_FLAGS | K_MEM_PERM_RW);
printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped, printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped,
mapped + CONFIG_MMU_PAGE_SIZE); mapped + CONFIG_MMU_PAGE_SIZE);

View file

@ -46,7 +46,7 @@ static void smh_reg_map(struct shared_multi_heap_region *region)
mem_attr = (region->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE; mem_attr = (region->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE;
mem_attr |= K_MEM_PERM_RW; mem_attr |= K_MEM_PERM_RW;
z_phys_map(&v_addr, region->addr, region->size, mem_attr); k_mem_map_phys_bare(&v_addr, region->addr, region->size, mem_attr);
region->addr = (uintptr_t) v_addr; region->addr = (uintptr_t) v_addr;
} }

View file

@ -30,7 +30,7 @@ manifest:
# Please add items below based on alphabetical order # Please add items below based on alphabetical order
projects: projects:
- name: acpica - name: acpica
revision: da5f2721e1c7f188fe04aa50af76f4b94f3c3ea3 revision: 8d24867bc9c9d81c81eeac59391cda59333affd4
path: modules/lib/acpica path: modules/lib/acpica
- name: bsim - name: bsim
repo-path: babblesim-manifest repo-path: babblesim-manifest