Compare commits

...

17 commits

Author SHA1 Message Date
6ecac2d0bc Use "uintptr_t" for 64-bit targets
On 64-bit targets, casting from pointer to unsigned can lose data.
Instead, cast to uintptr_t. This causes no code change on 32-bit
platforms, since the types have the same width, but preserves all bits
on systems where pointers are 64 bits.
2025-02-10 08:26:09 -06:00
Guillaume Souchere
ea82911f4c Merge branch 'fix/block_allocation_size' into 'idf'
Fix block allocation size

See merge request espressif/tlsf!12
2024-08-09 15:56:53 +08:00
Scott Shawcroft
c499512eed Fix adding pool with non-zeroed memory 2024-08-08 13:13:31 +02:00
Scott Shawcroft
0c8ce8f470 Fix assertion error due to zero length allocation
The original code missed a * to check the value of size for
non-zero. This corrects that error and adds an additional check
after the first alignment.
2024-08-08 13:13:08 +02:00
Guillaume Souchere
36e304a5d4 Merge branch 'feature/add-repository-integrations' into 'idf'
Add repository integrations (by Create-project-tools)

See merge request espressif/tlsf!11
2024-07-31 18:08:01 +08:00
Guillaume Souchere
3862836df0 ci(repo-integrations): Update component name to heap 2024-07-31 12:06:35 +02:00
Espressif BOT
be2197443c ci(repo-integrations): Add repository integrations (by Create-project-tools) 2024-07-31 15:16:27 +08:00
Guillaume Souchere
8fc595fe22 Merge branch 'fix/tlsf_walk_pool' into 'idf'
fix(tlsf): Fix while loop in walker

See merge request espressif/tlsf!10
2024-03-20 13:17:05 +08:00
Guillaume Souchere
817349ad39 fix(tlsf): Fix while loop in walker
Exit the loop before getting the next block if the
pool_walker returns false.
2024-03-19 13:51:43 +01:00
Guillaume Souchere
e282f0be5a Merge branch 'feat/add-return-value-to-walker' into 'idf'
change(tlsf): Add return value to tlsf_walker

See merge request espressif/tlsf!9
2024-03-19 15:35:55 +08:00
Guillaume Souchere
53a42a2c1a Merge branch 'feat/add-malloc-with-address' into 'idf'
feat(tlsf): Add a function to malloc at a given address

See merge request espressif/tlsf!8
2024-03-06 16:52:33 +08:00
Guillaume Souchere
b2f44c4bf3 feat(tlsf): Add a function to malloc at a given address
The function will find a suitable free block that can contain
the malloc size at the given address in the memory pool, split
the free block to create the block that will contain the allocated
memory, update the status of the newly created blocks (free / used)
and return the pointer to the memory allocated at the given address.

If no block is found (the status of the heap doesn't allow for an
allocation at the given address) the function will return NULL.
2024-03-06 07:33:08 +01:00
Guillaume Souchere
84048e02ba change(tlsf): Add return value to tlsf_walker
This commit adds a return boolean value to the tlsf_walker
function allowing users to interrupt the traversal of the
pool when returning from any call to the tlsf_walker.
2024-02-27 12:27:38 +01:00
Guillaume Souchere
ddfa052fbf Merge branch 'fix/tlsf_fit_size_for_small_size' into 'idf'
fix(tlsf): tlsf_fit_size GoodFit should not be applied for small block size

See merge request espressif/tlsf!7
2024-01-16 17:11:06 +08:00
Guillaume Souchere
32701a163d fix(tlsf): tlsf_fit_size GoodFit
The good fit mechanism should not be applied for small blocks.
If the size in parameter is inferior to control->small_block_size.
then the function tlsf_fit_size should return size directly.
2024-01-16 07:42:58 +01:00
Guillaume Souchere
d2e28f8724 Merge branch 'fix/call_check_hook_for_all_blocks' into 'idf'
tlsf: call the tlsf_check_hook from intergrity_walker to check all blocks

See merge request espressif/tlsf!6
2023-10-16 16:18:01 +08:00
Guillaume Souchere
262d11fcfa tlsf: move the call the tlsf_check_hook from tlsf_check to the intergrity_walker function
Previously the hook was called from tlsf_check function leading to only check the free blocks
and leaving the used blocks unchecked by the hook. By moving the function hook call to the
integrity_walker function, free and used blocks are now passed to the function hook.
2023-09-27 13:22:02 +02:00
5 changed files with 193 additions and 36 deletions

21
.github/workflows/issue_comment.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: Sync issue comments to JIRA
# This workflow will be triggered when new issue comment is created (including PR comments)
on: issue_comment
jobs:
sync_issue_comments_to_jira:
name: Sync Issue Comments to Jira
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Sync issue comments to JIRA
uses: espressif/sync-jira-actions@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
JIRA_PASS: ${{ secrets.JIRA_PASS }}
JIRA_PROJECT: IDFGH
JIRA_COMPONENT: heap
JIRA_URL: ${{ secrets.JIRA_URL }}
JIRA_USER: ${{ secrets.JIRA_USER }}

21
.github/workflows/new_issues.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: Sync issues to Jira
# This workflow will be triggered when a new issue is opened
on: issues
jobs:
sync_issues_to_jira:
name: Sync issues to Jira
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Sync GitHub issues to Jira project
uses: espressif/sync-jira-actions@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
JIRA_PASS: ${{ secrets.JIRA_PASS }}
JIRA_PROJECT: IDFGH
JIRA_COMPONENT: heap
JIRA_URL: ${{ secrets.JIRA_URL }}
JIRA_USER: ${{ secrets.JIRA_USER }}

26
.github/workflows/new_prs.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Sync remain PRs to Jira
# This workflow will be triggered every hour, to sync remaining PRs (i.e. PRs with zero comment) to Jira project
# Note that, PRs can also get synced when new PR comment is created
on:
schedule:
- cron: "0 * * * *"
jobs:
sync_prs_to_jira:
name: Sync PRs to Jira
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Sync PRs to Jira project
uses: espressif/sync-jira-actions@v1
with:
cron_job: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
JIRA_PASS: ${{ secrets.JIRA_PASS }}
JIRA_PROJECT: IDFGH
JIRA_COMPONENT: heap
JIRA_URL: ${{ secrets.JIRA_URL }}
JIRA_USER: ${{ secrets.JIRA_USER }}

158
tlsf.c
View file

@ -7,6 +7,7 @@
#include <string.h>
#include <limits.h>
#include <stdio.h>
#include <stdint.h>
#include "tlsf.h"
#include "tlsf_common.h"
#include "tlsf_block_functions.h"
@ -284,8 +285,8 @@ static inline __attribute__((always_inline)) void mapping_search(control_t* cont
{
if (*size >= control->small_block_size)
{
const size_t round = (1 << (tlsf_fls_sizet(*size) - control->sl_index_count_log2)) - 1;
*size = (*size + round) & ~round;
const size_t round = (1 << (tlsf_fls_sizet(*size) - control->sl_index_count_log2));
*size = align_up(*size, round);
}
mapping_insert(control, *size, fli, sli);
}
@ -545,7 +546,7 @@ static inline __attribute__((always_inline)) block_header_t* block_locate_free(c
int fl = 0, sl = 0;
block_header_t* block = 0;
if (size)
if (*size)
{
mapping_search(control, size, &fl, &sl);
@ -659,7 +660,7 @@ typedef struct integrity_t
#define tlsf_insist(x) { if (!(x)) { status--; } }
static void integrity_walker(void* ptr, size_t size, int used, void* user)
static bool integrity_walker(void* ptr, size_t size, int used, void* user)
{
block_header_t* block = block_from_ptr(ptr);
integrity_t* integ = tlsf_cast(integrity_t*, user);
@ -668,14 +669,34 @@ static void integrity_walker(void* ptr, size_t size, int used, void* user)
const size_t this_block_size = block_size(block);
int status = 0;
(void)used;
tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
tlsf_insist(size == this_block_size && "block size incorrect");
if (tlsf_check_hook != NULL)
{
/* block_size(block) returns the size of the usable memory when the block is allocated.
* As the block under test is free, we need to subtract to the block size the next_free
* and prev_free fields of the block header as they are not a part of the usable memory
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
* as this field is in fact part of the current free block and not part of the next (allocated)
* block. Check the comments in block_split function for more details.
*/
const size_t actual_free_block_size = used ? this_block_size :
this_block_size - offsetof(block_header_t, next_free)- block_header_overhead;
void* ptr_block = used ? (void*)block + block_start_offset :
(void*)block + sizeof(block_header_t);
tlsf_insist(tlsf_check_hook(ptr_block, actual_free_block_size, !used));
}
integ->prev_status = this_status;
integ->status += status;
return true;
}
int tlsf_check(tlsf_t tlsf)
{
int i, j;
@ -722,23 +743,6 @@ int tlsf_check(tlsf_t tlsf)
mapping_insert(control, block_size(block), &fli, &sli);
tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
if (tlsf_check_hook != NULL)
{
/* block_size(block) returns the size of the usable memory when the block is allocated.
* As the block under test is free, we need to subtract to the block size the next_free
* and prev_free fields of the block header as they are not a part of the usable memory
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
* as this field is in fact part of the current free block and not part of the next (allocated)
* block. Check the comments in block_split function for more details.
*/
const size_t actual_free_block_size = block_size(block)
- offsetof(block_header_t, next_free)
- block_header_overhead;
tlsf_insist(tlsf_check_hook((void*)block + sizeof(block_header_t),
actual_free_block_size, is_block_free));
}
block = block->next_free;
}
}
@ -749,10 +753,11 @@ int tlsf_check(tlsf_t tlsf)
#undef tlsf_insist
static void default_walker(void* ptr, size_t size, int used, void* user)
static bool default_walker(void* ptr, size_t size, int used, void* user)
{
(void)user;
printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
return true;
}
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
@ -761,14 +766,18 @@ void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
block_header_t* block =
offset_to_block(pool, -(int)block_header_overhead);
while (block && !block_is_last(block))
bool ret_val = true;
while (block && !block_is_last(block) && ret_val == true)
{
pool_walker(
ret_val = pool_walker(
block_to_ptr(block),
block_size(block),
!block_is_free(block),
user);
block = block_next(block);
if (ret_val == true) {
block = block_next(block);
}
}
}
@ -794,17 +803,20 @@ int tlsf_check_pool(pool_t pool)
size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
{
/* because it's GoodFit, allocable size is one range lower */
if (size && tlsf != NULL)
{
size_t sl_interval;
control_t* control = tlsf_cast(control_t*, tlsf);
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
return size & ~(sl_interval - 1);
if (size == 0 || tlsf == NULL) {
return 0;
}
return 0;
}
control_t* control = tlsf_cast(control_t*, tlsf);
if (size < control->small_block_size) {
return adjust_request_size(tlsf, size, ALIGN_SIZE);
}
/* because it's GoodFit, allocable size is one range lower */
size_t sl_interval;
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
return size & ~(sl_interval - 1);
}
/*
** Size of the TLSF structures in a given memory block passed to
@ -1001,11 +1013,87 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
// Returned size is 0 when the requested size is larger than the max block
// size.
if (adjust == 0) {
return NULL;
}
// block_locate_free() may adjust our allocated size further.
block_header_t* block = block_locate_free(control, &adjust);
return block_prepare_used(control, block, adjust);
}
/**
* @brief Allocate memory of at least `size` bytes at a given address in the pool.
*
* @param tlsf TLSF structure to allocate memory from.
* @param size Minimum size, in bytes, of the memory to allocate
* @param address address at which the allocation must be done
*
* @return pointer to free memory or NULL in case of incapacity to perform the malloc
*/
void* tlsf_malloc_addr(tlsf_t tlsf, size_t size, void *address)
{
control_t* control = tlsf_cast(control_t*, tlsf);
/* adjust the address to be ALIGN_SIZE bytes aligned. */
const uintptr_t addr_adjusted = align_down(tlsf_cast(uintptr_t, address), ALIGN_SIZE);
/* adjust the size to be ALIGN_SIZE bytes aligned. Add to the size the difference
* between the requested address and the address_adjusted. */
size_t size_adjusted = align_up(size + (tlsf_cast(uintptr_t, address) - addr_adjusted), ALIGN_SIZE);
/* find the free block that starts before the address in the pool and is big enough
* to support the size of allocation at the given address */
block_header_t* block = offset_to_block(tlsf_get_pool(tlsf), -(int)block_header_overhead);
const char *alloc_start = tlsf_cast(char*, addr_adjusted);
const char *alloc_end = alloc_start + size_adjusted;
bool block_found = false;
do {
const char *block_start = tlsf_cast(char*, block_to_ptr(block));
const char *block_end = tlsf_cast(char*, block_to_ptr(block)) + block_size(block);
if (block_start <= alloc_start && block_end > alloc_start) {
/* A: block_end >= alloc_end. B: block is free */
if (block_end < alloc_end || !block_is_free(block)) {
/* not(A) || not(B)
* We won't find another suitable block from this point on
* so we can break and return NULL */
break;
}
/* A && B
* The block can fit the alloc and is located at a position allowing for the alloc
* to be placed at the given address. We can return from the while */
block_found = true;
} else if (!block_is_last(block)) {
/* the block doesn't match the expected criteria, continue with the next block */
block = block_next(block);
}
} while (!block_is_last(block) && block_found == false);
if (!block_found) {
return NULL;
}
/* remove block from the free list since a part of it will be used */
block_remove(control, block);
/* trim any leading space or add the leading space to the overall requested size
* if the leading space is not big enough to store a block of minimum size */
const size_t space_before_addr_adjusted = addr_adjusted - tlsf_cast(uintptr_t, block_to_ptr(block));
block_header_t *return_block = block;
if (space_before_addr_adjusted >= block_size_min) {
return_block = block_trim_free_leading(control, block, space_before_addr_adjusted);
}
else {
size_adjusted += space_before_addr_adjusted;
}
/* trim trailing space if any and return a pointer to the first usable byte allocated */
return block_prepare_used(control, return_block, size_adjusted);
}
/**
* @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
*

3
tlsf.h
View file

@ -33,6 +33,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
void* tlsf_malloc(tlsf_t tlsf, size_t size);
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
void* tlsf_malloc_addr(tlsf_t tlsf, size_t size, void *address);
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
void tlsf_free(tlsf_t tlsf, void* ptr);
@ -58,7 +59,7 @@ size_t tlsf_alloc_overhead(void);
size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
/* Debugging. */
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
typedef bool (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
/* Returns nonzero if any internal consistency check fails. */
int tlsf_check(tlsf_t tlsf);