We need to do a few things differently if we are to support a virtual memory map, i.e. CONFIG_MMU where CONFIG_KERNEL_VM_BASE is not the same as CONFIG_SRAM_BASE_ADDRESS. - All sections must be specified with a VMA and LMA, where VMA is the virtual address and LMA is the physical memory location. - All sections must be specified with ALIGN_WITH_INPUT to keep VMAs and LMAs synchronized To do this, the existing linker macros need some adjustment: - GROUP_LINK_IN undefined when CONFIG_KERNEL_VM_BASE is not the same as CONFIG_SRAM_BASE_ADDRESS. - New macro GROUP_ROM_LINK_IN for text/rodata sections - New macro GROUP_NOLOAD_LINK_IN for bss/noinit sections - Implicit ALIGN_WITH_INPUT for all sections GROUP_FOLLOWS_AT is unused anywhere in the kernel for years now and has been removed. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com> Signed-off-by: Daniel Leung <daniel.leung@intel.com>
79 lines
2 KiB
Text
79 lines
2 KiB
Text
/*
|
|
* Copyright (c) 2019 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/* Copied from linker.ld */
|
|
|
|
#ifdef CONFIG_ARM
|
|
SECTION_DATA_PROLOGUE(_GCOV_BSS_SECTION_NAME,(NOLOAD),)
|
|
{
|
|
#ifdef CONFIG_USERSPACE
|
|
MPU_ALIGN(__gcov_bss_end - __gcov_bss_start );
|
|
#else /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT && CONFIG_USERSPACE */
|
|
. = ALIGN(_region_min_align);
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
__gcov_bss_start = .;
|
|
KEEP(*(".bss.__gcov0.*"));
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
MPU_ALIGN(__gcov_bss_end - __gcov_bss_start );
|
|
#else /* CONFIG_USERSPACE */
|
|
. = ALIGN(_region_min_align);
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
__gcov_bss_end = .;
|
|
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
|
|
|
__gcov_bss_num_words = ((__gcov_bss_end - __gcov_bss_start) >> 2);
|
|
__gcov_bss_size = __gcov_bss_end - __gcov_bss_start;
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
SECTION_PROLOGUE(_GCOV_BSS_SECTION_NAME, (NOLOAD), ALIGN(16))
|
|
{
|
|
MMU_PAGE_ALIGN
|
|
__gcov_bss_start = .;
|
|
*(".bss.__gcov0.*");
|
|
. = ALIGN(8);
|
|
MMU_PAGE_ALIGN
|
|
__gcov_bss_end = .;
|
|
}GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
|
|
|
__gcov_bss_num_words = ((__gcov_bss_end - __gcov_bss_start) >> 2);
|
|
__gcov_bss_size = __gcov_bss_end - __gcov_bss_start;
|
|
|
|
#elif CONFIG_X86
|
|
SECTION_PROLOGUE(_GCOV_BSS_SECTION_NAME, (NOLOAD),)
|
|
{
|
|
MMU_PAGE_ALIGN
|
|
__gcov_bss_start = .;
|
|
*(".bss.__gcov0.*");
|
|
. = ALIGN(4);
|
|
MMU_PAGE_ALIGN
|
|
__gcov_bss_end = .;
|
|
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
|
|
|
__gcov_bss_num_words = ((__gcov_bss_end - __gcov_bss_start) >> 2);
|
|
__gcov_bss_size = __gcov_bss_end - __gcov_bss_start;
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARC
|
|
SECTION_PROLOGUE(_GCOV_BSS_SECTION_NAME, (NOLOAD),)
|
|
{
|
|
MPU_MIN_SIZE_ALIGN
|
|
__gcov_bss_start = .;
|
|
*(".bss.__gcov0.*");
|
|
#ifdef CONFIG_USERSPACE
|
|
. = ALIGN(1 << LOG2CEIL(. - __gcov_bss_start));
|
|
#else
|
|
MPU_MIN_SIZE_ALIGN
|
|
#endif
|
|
__gcov_bss_end = .;
|
|
} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
|
|
|
__gcov_bss_num_words = ((__gcov_bss_end - __gcov_bss_start) >> 2);
|
|
__gcov_bss_size = __gcov_bss_end - __gcov_bss_start;
|
|
#endif
|