Compare commits
17 commits
circuitpyt
...
sixtyfour
| Author | SHA1 | Date | |
|---|---|---|---|
| 6ecac2d0bc | |||
|
|
ea82911f4c | ||
|
|
c499512eed | ||
|
|
0c8ce8f470 | ||
|
|
36e304a5d4 | ||
|
|
3862836df0 | ||
|
|
be2197443c | ||
|
|
8fc595fe22 | ||
|
|
817349ad39 | ||
|
|
e282f0be5a | ||
|
|
53a42a2c1a | ||
|
|
b2f44c4bf3 | ||
|
|
84048e02ba | ||
|
|
ddfa052fbf | ||
|
|
32701a163d | ||
|
|
d2e28f8724 | ||
|
|
262d11fcfa |
5 changed files with 193 additions and 36 deletions
21
.github/workflows/issue_comment.yml
vendored
Normal file
21
.github/workflows/issue_comment.yml
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
name: Sync issue comments to JIRA
|
||||
|
||||
# This workflow will be triggered when new issue comment is created (including PR comments)
|
||||
on: issue_comment
|
||||
|
||||
jobs:
|
||||
sync_issue_comments_to_jira:
|
||||
name: Sync Issue Comments to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Sync issue comments to JIRA
|
||||
uses: espressif/sync-jira-actions@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_COMPONENT: heap
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
21
.github/workflows/new_issues.yml
vendored
Normal file
21
.github/workflows/new_issues.yml
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
name: Sync issues to Jira
|
||||
|
||||
# This workflow will be triggered when a new issue is opened
|
||||
on: issues
|
||||
|
||||
jobs:
|
||||
sync_issues_to_jira:
|
||||
name: Sync issues to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Sync GitHub issues to Jira project
|
||||
uses: espressif/sync-jira-actions@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_COMPONENT: heap
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
26
.github/workflows/new_prs.yml
vendored
Normal file
26
.github/workflows/new_prs.yml
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
name: Sync remain PRs to Jira
|
||||
|
||||
# This workflow will be triggered every hour, to sync remaining PRs (i.e. PRs with zero comment) to Jira project
|
||||
# Note that, PRs can also get synced when new PR comment is created
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *"
|
||||
|
||||
jobs:
|
||||
sync_prs_to_jira:
|
||||
name: Sync PRs to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Sync PRs to Jira project
|
||||
uses: espressif/sync-jira-actions@v1
|
||||
with:
|
||||
cron_job: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_COMPONENT: heap
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
158
tlsf.c
158
tlsf.c
|
|
@ -7,6 +7,7 @@
|
|||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include "tlsf.h"
|
||||
#include "tlsf_common.h"
|
||||
#include "tlsf_block_functions.h"
|
||||
|
|
@ -284,8 +285,8 @@ static inline __attribute__((always_inline)) void mapping_search(control_t* cont
|
|||
{
|
||||
if (*size >= control->small_block_size)
|
||||
{
|
||||
const size_t round = (1 << (tlsf_fls_sizet(*size) - control->sl_index_count_log2)) - 1;
|
||||
*size = (*size + round) & ~round;
|
||||
const size_t round = (1 << (tlsf_fls_sizet(*size) - control->sl_index_count_log2));
|
||||
*size = align_up(*size, round);
|
||||
}
|
||||
mapping_insert(control, *size, fli, sli);
|
||||
}
|
||||
|
|
@ -545,7 +546,7 @@ static inline __attribute__((always_inline)) block_header_t* block_locate_free(c
|
|||
int fl = 0, sl = 0;
|
||||
block_header_t* block = 0;
|
||||
|
||||
if (size)
|
||||
if (*size)
|
||||
{
|
||||
mapping_search(control, size, &fl, &sl);
|
||||
|
||||
|
|
@ -659,7 +660,7 @@ typedef struct integrity_t
|
|||
|
||||
#define tlsf_insist(x) { if (!(x)) { status--; } }
|
||||
|
||||
static void integrity_walker(void* ptr, size_t size, int used, void* user)
|
||||
static bool integrity_walker(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
block_header_t* block = block_from_ptr(ptr);
|
||||
integrity_t* integ = tlsf_cast(integrity_t*, user);
|
||||
|
|
@ -668,14 +669,34 @@ static void integrity_walker(void* ptr, size_t size, int used, void* user)
|
|||
const size_t this_block_size = block_size(block);
|
||||
|
||||
int status = 0;
|
||||
(void)used;
|
||||
tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
|
||||
tlsf_insist(size == this_block_size && "block size incorrect");
|
||||
|
||||
if (tlsf_check_hook != NULL)
|
||||
{
|
||||
/* block_size(block) returns the size of the usable memory when the block is allocated.
|
||||
* As the block under test is free, we need to subtract to the block size the next_free
|
||||
* and prev_free fields of the block header as they are not a part of the usable memory
|
||||
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
|
||||
* as this field is in fact part of the current free block and not part of the next (allocated)
|
||||
* block. Check the comments in block_split function for more details.
|
||||
*/
|
||||
const size_t actual_free_block_size = used ? this_block_size :
|
||||
this_block_size - offsetof(block_header_t, next_free)- block_header_overhead;
|
||||
|
||||
void* ptr_block = used ? (void*)block + block_start_offset :
|
||||
(void*)block + sizeof(block_header_t);
|
||||
|
||||
tlsf_insist(tlsf_check_hook(ptr_block, actual_free_block_size, !used));
|
||||
}
|
||||
|
||||
integ->prev_status = this_status;
|
||||
integ->status += status;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
int tlsf_check(tlsf_t tlsf)
|
||||
{
|
||||
int i, j;
|
||||
|
|
@ -722,23 +743,6 @@ int tlsf_check(tlsf_t tlsf)
|
|||
mapping_insert(control, block_size(block), &fli, &sli);
|
||||
tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
|
||||
|
||||
if (tlsf_check_hook != NULL)
|
||||
{
|
||||
/* block_size(block) returns the size of the usable memory when the block is allocated.
|
||||
* As the block under test is free, we need to subtract to the block size the next_free
|
||||
* and prev_free fields of the block header as they are not a part of the usable memory
|
||||
* when the block is free. In addition, we also need to subtract the size of prev_phys_block
|
||||
* as this field is in fact part of the current free block and not part of the next (allocated)
|
||||
* block. Check the comments in block_split function for more details.
|
||||
*/
|
||||
const size_t actual_free_block_size = block_size(block)
|
||||
- offsetof(block_header_t, next_free)
|
||||
- block_header_overhead;
|
||||
|
||||
tlsf_insist(tlsf_check_hook((void*)block + sizeof(block_header_t),
|
||||
actual_free_block_size, is_block_free));
|
||||
}
|
||||
|
||||
block = block->next_free;
|
||||
}
|
||||
}
|
||||
|
|
@ -749,10 +753,11 @@ int tlsf_check(tlsf_t tlsf)
|
|||
|
||||
#undef tlsf_insist
|
||||
|
||||
static void default_walker(void* ptr, size_t size, int used, void* user)
|
||||
static bool default_walker(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
(void)user;
|
||||
printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
|
||||
return true;
|
||||
}
|
||||
|
||||
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
|
||||
|
|
@ -761,14 +766,18 @@ void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
|
|||
block_header_t* block =
|
||||
offset_to_block(pool, -(int)block_header_overhead);
|
||||
|
||||
while (block && !block_is_last(block))
|
||||
bool ret_val = true;
|
||||
while (block && !block_is_last(block) && ret_val == true)
|
||||
{
|
||||
pool_walker(
|
||||
ret_val = pool_walker(
|
||||
block_to_ptr(block),
|
||||
block_size(block),
|
||||
!block_is_free(block),
|
||||
user);
|
||||
block = block_next(block);
|
||||
|
||||
if (ret_val == true) {
|
||||
block = block_next(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -794,17 +803,20 @@ int tlsf_check_pool(pool_t pool)
|
|||
|
||||
size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
|
||||
{
|
||||
/* because it's GoodFit, allocable size is one range lower */
|
||||
if (size && tlsf != NULL)
|
||||
{
|
||||
size_t sl_interval;
|
||||
control_t* control = tlsf_cast(control_t*, tlsf);
|
||||
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
|
||||
return size & ~(sl_interval - 1);
|
||||
if (size == 0 || tlsf == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
control_t* control = tlsf_cast(control_t*, tlsf);
|
||||
if (size < control->small_block_size) {
|
||||
return adjust_request_size(tlsf, size, ALIGN_SIZE);
|
||||
}
|
||||
|
||||
/* because it's GoodFit, allocable size is one range lower */
|
||||
size_t sl_interval;
|
||||
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
|
||||
return size & ~(sl_interval - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
** Size of the TLSF structures in a given memory block passed to
|
||||
|
|
@ -1001,11 +1013,87 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
|
|||
{
|
||||
control_t* control = tlsf_cast(control_t*, tlsf);
|
||||
size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
|
||||
// Returned size is 0 when the requested size is larger than the max block
|
||||
// size.
|
||||
if (adjust == 0) {
|
||||
return NULL;
|
||||
}
|
||||
// block_locate_free() may adjust our allocated size further.
|
||||
block_header_t* block = block_locate_free(control, &adjust);
|
||||
return block_prepare_used(control, block, adjust);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Allocate memory of at least `size` bytes at a given address in the pool.
|
||||
*
|
||||
* @param tlsf TLSF structure to allocate memory from.
|
||||
* @param size Minimum size, in bytes, of the memory to allocate
|
||||
* @param address address at which the allocation must be done
|
||||
*
|
||||
* @return pointer to free memory or NULL in case of incapacity to perform the malloc
|
||||
*/
|
||||
void* tlsf_malloc_addr(tlsf_t tlsf, size_t size, void *address)
|
||||
{
|
||||
control_t* control = tlsf_cast(control_t*, tlsf);
|
||||
|
||||
/* adjust the address to be ALIGN_SIZE bytes aligned. */
|
||||
const uintptr_t addr_adjusted = align_down(tlsf_cast(uintptr_t, address), ALIGN_SIZE);
|
||||
|
||||
/* adjust the size to be ALIGN_SIZE bytes aligned. Add to the size the difference
|
||||
* between the requested address and the address_adjusted. */
|
||||
size_t size_adjusted = align_up(size + (tlsf_cast(uintptr_t, address) - addr_adjusted), ALIGN_SIZE);
|
||||
|
||||
/* find the free block that starts before the address in the pool and is big enough
|
||||
* to support the size of allocation at the given address */
|
||||
block_header_t* block = offset_to_block(tlsf_get_pool(tlsf), -(int)block_header_overhead);
|
||||
|
||||
const char *alloc_start = tlsf_cast(char*, addr_adjusted);
|
||||
const char *alloc_end = alloc_start + size_adjusted;
|
||||
bool block_found = false;
|
||||
do {
|
||||
const char *block_start = tlsf_cast(char*, block_to_ptr(block));
|
||||
const char *block_end = tlsf_cast(char*, block_to_ptr(block)) + block_size(block);
|
||||
if (block_start <= alloc_start && block_end > alloc_start) {
|
||||
/* A: block_end >= alloc_end. B: block is free */
|
||||
if (block_end < alloc_end || !block_is_free(block)) {
|
||||
/* not(A) || not(B)
|
||||
* We won't find another suitable block from this point on
|
||||
* so we can break and return NULL */
|
||||
break;
|
||||
}
|
||||
/* A && B
|
||||
* The block can fit the alloc and is located at a position allowing for the alloc
|
||||
* to be placed at the given address. We can return from the while */
|
||||
block_found = true;
|
||||
} else if (!block_is_last(block)) {
|
||||
/* the block doesn't match the expected criteria, continue with the next block */
|
||||
block = block_next(block);
|
||||
}
|
||||
|
||||
} while (!block_is_last(block) && block_found == false);
|
||||
|
||||
if (!block_found) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* remove block from the free list since a part of it will be used */
|
||||
block_remove(control, block);
|
||||
|
||||
/* trim any leading space or add the leading space to the overall requested size
|
||||
* if the leading space is not big enough to store a block of minimum size */
|
||||
const size_t space_before_addr_adjusted = addr_adjusted - tlsf_cast(uintptr_t, block_to_ptr(block));
|
||||
block_header_t *return_block = block;
|
||||
if (space_before_addr_adjusted >= block_size_min) {
|
||||
return_block = block_trim_free_leading(control, block, space_before_addr_adjusted);
|
||||
}
|
||||
else {
|
||||
size_adjusted += space_before_addr_adjusted;
|
||||
}
|
||||
|
||||
/* trim trailing space if any and return a pointer to the first usable byte allocated */
|
||||
return block_prepare_used(control, return_block, size_adjusted);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
|
||||
*
|
||||
|
|
|
|||
3
tlsf.h
3
tlsf.h
|
|
@ -33,6 +33,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
|
|||
void* tlsf_malloc(tlsf_t tlsf, size_t size);
|
||||
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
|
||||
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
|
||||
void* tlsf_malloc_addr(tlsf_t tlsf, size_t size, void *address);
|
||||
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
|
||||
void tlsf_free(tlsf_t tlsf, void* ptr);
|
||||
|
||||
|
|
@ -58,7 +59,7 @@ size_t tlsf_alloc_overhead(void);
|
|||
size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
|
||||
|
||||
/* Debugging. */
|
||||
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
|
||||
typedef bool (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
|
||||
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
|
||||
/* Returns nonzero if any internal consistency check fails. */
|
||||
int tlsf_check(tlsf_t tlsf);
|
||||
|
|
|
|||
Loading…
Reference in a new issue