net: pkt: Collect net_pkt allocation statistics

If CONFIG_NET_PKT_ALLOC_STATS is enabled, then "net mem" command
can show net_pkt allocation statistics like succeed / failed
allocation counts, average sizes and allocation time.

Signed-off-by: Jukka Rissanen <jukka.rissanen@nordicsemi.no>
This commit is contained in:
Jukka Rissanen 2024-07-03 15:51:43 +03:00 committed by Henrik Brix Andersen
parent f7ef64df28
commit 67be3a166b
5 changed files with 157 additions and 11 deletions

View file

@ -50,6 +50,28 @@ struct net_context;
/** @cond INTERNAL_HIDDEN */ /** @cond INTERNAL_HIDDEN */
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
struct net_pkt_alloc_stats {
uint64_t alloc_sum;
uint64_t time_sum;
uint32_t count;
};
struct net_pkt_alloc_stats_slab {
struct net_pkt_alloc_stats ok;
struct net_pkt_alloc_stats fail;
struct k_mem_slab *slab;
};
#define NET_PKT_ALLOC_STATS_DEFINE(alloc_name, slab_name) \
STRUCT_SECTION_ITERABLE(net_pkt_alloc_stats_slab, alloc_name) = { \
.slab = &slab_name, \
}
#else
#define NET_PKT_ALLOC_STATS_DEFINE(name, slab)
#endif /* CONFIG_NET_PKT_ALLOC_STATS */
/* buffer cursor used in net_pkt */ /* buffer cursor used in net_pkt */
struct net_pkt_cursor { struct net_pkt_cursor {
/** Current net_buf pointer by the cursor */ /** Current net_buf pointer by the cursor */
@ -145,6 +167,10 @@ struct net_pkt {
}; };
#endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */ #endif /* CONFIG_NET_PKT_RXTIME_STATS || CONFIG_NET_PKT_TXTIME_STATS */
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
struct net_pkt_alloc_stats_slab *alloc_stats;
#endif /* CONFIG_NET_PKT_ALLOC_STATS */
/** Reference counter */ /** Reference counter */
atomic_t atomic_ref; atomic_t atomic_ref;
@ -1452,7 +1478,8 @@ static inline void net_pkt_set_remote_address(struct net_pkt *pkt,
* @param count Number of net_pkt in this slab. * @param count Number of net_pkt in this slab.
*/ */
#define NET_PKT_SLAB_DEFINE(name, count) \ #define NET_PKT_SLAB_DEFINE(name, count) \
K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4) K_MEM_SLAB_DEFINE(name, sizeof(struct net_pkt), count, 4); \
NET_PKT_ALLOC_STATS_DEFINE(pkt_alloc_stats_##name, name)
/** @cond INTERNAL_HIDDEN */ /** @cond INTERNAL_HIDDEN */

View file

@ -59,6 +59,12 @@ if(CONFIG_NET_TCP_ISN_RFC6528 OR CONFIG_NET_IPV6_PE)
endif() endif()
endif() endif()
if(CONFIG_NET_PKT_ALLOC_STATS)
zephyr_linker_sources(DATA_SECTIONS iterables_net_pkt_alloc_stats.ld)
zephyr_iterable_section(NAME net_pkt_alloc_stats_slab GROUP DATA_REGION
${XIP_ALIGN_WITH_INPUT} SUBALIGN ${CONFIG_LINKER_ITERABLE_SUBALIGN})
endif()
# To get private includes like net_shell.h # To get private includes like net_shell.h
zephyr_library_include_directories(. ${ZEPHYR_BASE}/subsys/net/lib) zephyr_library_include_directories(. ${ZEPHYR_BASE}/subsys/net/lib)

View file

@ -1011,6 +1011,15 @@ config NET_PKT_TXTIME_STATS_DETAIL
The extra statistics can be seen in net-shell using "net stats" The extra statistics can be seen in net-shell using "net stats"
command. command.
config NET_PKT_ALLOC_STATS
bool "Get net_pkt allocation statistics"
help
Collect net_pkt allocation statistics, like number of allocations,
average allocation size, average allocation time in usec, for both
succeeded and failed allocations.
The extra statistics can be seen in net-shell using "net mem"
command.
config NET_PROMISCUOUS_MODE config NET_PROMISCUOUS_MODE
bool "Promiscuous mode support" bool "Promiscuous mode support"
select NET_MGMT select NET_MGMT

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2024 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/linker/iterable_sections.h>
ITERABLE_SECTION_RAM(net_pkt_alloc_stats_slab, Z_LINK_ITERABLE_SUBALIGN)

View file

@ -128,8 +128,8 @@ BUILD_ASSERT(CONFIG_NET_BUF_DATA_SIZE >= 96);
#error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1" #error "Minimum value for CONFIG_NET_BUF_TX_COUNT is 1"
#endif #endif
K_MEM_SLAB_DEFINE(rx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_RX_COUNT, 4); NET_PKT_SLAB_DEFINE(rx_pkts, CONFIG_NET_PKT_RX_COUNT);
K_MEM_SLAB_DEFINE(tx_pkts, sizeof(struct net_pkt), CONFIG_NET_PKT_TX_COUNT, 4); NET_PKT_SLAB_DEFINE(tx_pkts, CONFIG_NET_PKT_TX_COUNT);
#if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE) #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
@ -866,17 +866,79 @@ void net_pkt_print(void)
/* New allocator and API starts here */ /* New allocator and API starts here */
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
static struct net_pkt_alloc_stats_slab *find_alloc_stats(struct k_mem_slab *slab)
{
STRUCT_SECTION_FOREACH(net_pkt_alloc_stats_slab, tmp) {
if (tmp->slab == slab) {
return tmp;
}
}
NET_ASSERT("slab not found");
/* This will force a crash which is intended in this case as the
* slab should always have a valid value.
*/
return NULL;
}
#define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({ \
if (pkt->alloc_stats == NULL) { \
pkt->alloc_stats = find_alloc_stats(pkt->slab); \
} \
pkt->alloc_stats->ok.count++; \
if (pkt->alloc_stats->ok.count == 0) { \
pkt->alloc_stats->ok.alloc_sum = 0ULL; \
pkt->alloc_stats->ok.time_sum = 0ULL; \
} else { \
pkt->alloc_stats->ok.alloc_sum += (uint64_t)alloc_size; \
pkt->alloc_stats->ok.time_sum += (uint64_t)(k_cycle_get_32() - start); \
} \
\
pkt->alloc_stats->ok.count; \
})
#define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({ \
if (pkt->alloc_stats == NULL) { \
pkt->alloc_stats = find_alloc_stats(pkt->slab); \
} \
pkt->alloc_stats->fail.count++; \
if (pkt->alloc_stats->fail.count == 0) { \
pkt->alloc_stats->fail.alloc_sum = 0ULL; \
pkt->alloc_stats->fail.time_sum = 0ULL; \
} else { \
pkt->alloc_stats->fail.alloc_sum += (uint64_t)alloc_size;\
pkt->alloc_stats->fail.time_sum += (uint64_t)(k_cycle_get_32() - start); \
} \
\
pkt->alloc_stats->fail.count; \
})
#else
#define NET_PKT_ALLOC_STATS_UPDATE(pkt, alloc_size, start) ({ 0; })
#define NET_PKT_ALLOC_STATS_FAIL(pkt, alloc_size, start) ({ 0; })
#endif /* CONFIG_NET_PKT_ALLOC_STATS */
#if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE) #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
struct net_buf_pool *pool,
size_t size, k_timeout_t timeout, size_t size, k_timeout_t timeout,
const char *caller, int line) const char *caller, int line)
#else #else
static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
struct net_buf_pool *pool,
size_t size, k_timeout_t timeout) size_t size, k_timeout_t timeout)
#endif #endif
{ {
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
uint32_t start_time = k_cycle_get_32();
size_t total_size = size;
#else
ARG_UNUSED(pkt);
#endif
k_timepoint_t end = sys_timepoint_calc(timeout); k_timepoint_t end = sys_timepoint_calc(timeout);
struct net_buf *first = NULL; struct net_buf *first = NULL;
struct net_buf *current = NULL; struct net_buf *current = NULL;
@ -915,28 +977,49 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
#endif #endif
} while (size); } while (size);
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
NET_DBG("pkt %p %s stats rollover", pkt, "ok");
}
#endif
return first; return first;
error: error:
if (first) { if (first) {
net_buf_unref(first); net_buf_unref(first);
} }
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
NET_DBG("pkt %p %s stats rollover", pkt, "fail");
}
#endif
return NULL; return NULL;
} }
#else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */ #else /* !CONFIG_NET_BUF_FIXED_DATA_SIZE */
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
struct net_buf_pool *pool,
size_t size, k_timeout_t timeout, size_t size, k_timeout_t timeout,
const char *caller, int line) const char *caller, int line)
#else #else
static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool, static struct net_buf *pkt_alloc_buffer(struct net_pkt *pkt,
struct net_buf_pool *pool,
size_t size, k_timeout_t timeout) size_t size, k_timeout_t timeout)
#endif #endif
{ {
struct net_buf *buf; struct net_buf *buf;
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
uint32_t start_time = k_cycle_get_32();
size_t total_size = size;
#else
ARG_UNUSED(pkt);
#endif
buf = net_buf_alloc_len(pool, size, timeout); buf = net_buf_alloc_len(pool, size, timeout);
#if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG #if CONFIG_NET_PKT_LOG_LEVEL >= LOG_LEVEL_DBG
@ -949,6 +1032,18 @@ static struct net_buf *pkt_alloc_buffer(struct net_buf_pool *pool,
buf, buf->ref, caller, line); buf, buf->ref, caller, line);
#endif #endif
#if defined(CONFIG_NET_PKT_ALLOC_STATS)
if (buf) {
if (NET_PKT_ALLOC_STATS_UPDATE(pkt, total_size, start_time) == 0) {
NET_DBG("pkt %p %s stats rollover", pkt, "ok");
}
} else {
if (NET_PKT_ALLOC_STATS_FAIL(pkt, total_size, start_time) == 0) {
NET_DBG("pkt %p %s stats rollover", pkt, "fail");
}
}
#endif /* CONFIG_NET_PKT_ALLOC_STATS */
return buf; return buf;
} }
@ -1188,9 +1283,9 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt,
} }
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
buf = pkt_alloc_buffer(pool, alloc_len, timeout, caller, line); buf = pkt_alloc_buffer(pkt, pool, alloc_len, timeout, caller, line);
#else #else
buf = pkt_alloc_buffer(pool, alloc_len, timeout); buf = pkt_alloc_buffer(pkt, pool, alloc_len, timeout);
#endif #endif
if (!buf) { if (!buf) {
@ -1240,9 +1335,9 @@ int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
} }
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG #if NET_LOG_LEVEL >= LOG_LEVEL_DBG
buf = pkt_alloc_buffer(pool, size, timeout, caller, line); buf = pkt_alloc_buffer(pkt, pool, size, timeout, caller, line);
#else #else
buf = pkt_alloc_buffer(pool, size, timeout); buf = pkt_alloc_buffer(pkt, pool, size, timeout);
#endif #endif
if (!buf) { if (!buf) {