pm: policy: split policy APIs implementations

policy.c has grown organically, it contained many independent pieces of
code. This patch splits each logical unit into its own C file, making it
easier to browse the code.

Signed-off-by: Gerard Marull-Paretas <gerard@teslabs.com>
This commit is contained in:
Gerard Marull-Paretas 2024-10-22 16:00:54 +02:00 committed by Carles Cufí
parent a5e3a33b39
commit 6f4bb118a8
10 changed files with 546 additions and 486 deletions

View file

@ -1,10 +1,12 @@
# SPDX-License-Identifier: Apache-2.0
if(CONFIG_PM)
zephyr_sources(pm.c policy.c state.c)
zephyr_sources(pm.c state.c)
zephyr_sources_ifdef(CONFIG_PM_STATS pm_stats.c)
endif()
add_subdirectory(policy)
zephyr_sources_ifdef(CONFIG_PM_DEVICE device.c)
zephyr_sources_ifdef(CONFIG_PM_DEVICE_RUNTIME device_runtime.c)
zephyr_sources_ifdef(CONFIG_PM_DEVICE_SHELL pm_shell.c)

View file

@ -19,6 +19,8 @@ config PM
power management subsystem of the number of ticks until the next kernel
timer is due to expire.
rsource "policy/Kconfig"
if PM
module = PM
@ -51,35 +53,6 @@ config PM_NEED_ALL_DEVICES_IDLE
When this option is enabled, check that no devices are busy before
entering into system low power mode.
choice PM_POLICY
prompt "Idle State Power Management Policy"
default PM_POLICY_DEFAULT
help
Select the idle state power management policy.
config PM_POLICY_DEFAULT
bool "Default PM policy"
help
This option selects the default PM policy. Default policy is based
on CPU residency times and other constraints imposed by the drivers or
application.
config PM_POLICY_CUSTOM
bool "Custom PM Policy"
help
This options allows applications to override the default policy with
a custom implementation.
endchoice
config PM_POLICY_DEVICE_CONSTRAINTS
bool "Power state constraints per device"
help
This option allows devices to have a list of power states
that when the system transition to them, cause power loss in the device.
This used to set and release power state constraints when
it is needed by the device.
endif # PM
config PM_DEVICE

View file

@ -1,456 +0,0 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <zephyr/pm/pm.h>
#include <zephyr/pm/policy.h>
#include <zephyr/pm/state.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys_clock.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/time_units.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/sys/util_macro.h>
#include <zephyr/toolchain.h>
#include <zephyr/pm/device.h>
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
#define DT_SUB_LOCK_INIT(node_id) \
{ .state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
.lock = ATOMIC_INIT(0), \
},
/**
* State and substate lock structure.
*
* This struct is associating a reference counting to each <state,substate>
* couple to be used with the pm_policy_substate_lock_* functions.
*
* Operations on this array are in the order of O(n) with the number of power
* states and this is mostly due to the random nature of the substate value
* (that can be anything from a small integer value to a bitmask). We can
* probably do better with an hashmap.
*/
static struct {
enum pm_state state;
uint8_t substate_id;
atomic_t lock;
} substate_lock_t[] = {
DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
};
#endif
#if defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
struct pm_state_device_constraint {
const struct device *const dev;
size_t pm_constraints_size;
struct pm_state_constraint *constraints;
};
/**
* @brief Synthesize the name of the object that holds a device pm constraint.
*
* @param dev_id Device identifier.
*/
#define PM_CONSTRAINTS_NAME(node_id) _CONCAT(__devicepmconstraints_, node_id)
/**
* @brief initialize a device pm constraint with information from devicetree.
*
* @param node_id Node identifier.
*/
#define PM_STATE_CONSTRAINT_INIT(node_id) \
{ \
.state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
}
/**
* @brief Helper macro to define a device pm constraints.
*/
#define PM_STATE_CONSTRAINT_DEFINE(i, node_id) \
COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(DT_PHANDLE_BY_IDX(node_id, \
zephyr_disabling_power_states, i)), \
(PM_STATE_CONSTRAINT_INIT(DT_PHANDLE_BY_IDX(node_id, \
zephyr_disabling_power_states, i)),), ())
/**
* @brief Helper macro to generate a list of device pm constraints.
*/
#define PM_STATE_CONSTRAINTS_DEFINE(node_id) \
{ \
LISTIFY(DT_PROP_LEN_OR(node_id, zephyr_disabling_power_states, 0), \
PM_STATE_CONSTRAINT_DEFINE, (), node_id) \
}
/**
* @brief Helper macro to define an array of device pm constraints.
*/
#define CONSTRAINTS_DEFINE(node_id) \
Z_DECL_ALIGN(struct pm_state_constraint) \
PM_CONSTRAINTS_NAME(node_id)[] = \
PM_STATE_CONSTRAINTS_DEFINE(node_id);
#define DEVICE_CONSTRAINTS_DEFINE(node_id) \
COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (), \
(CONSTRAINTS_DEFINE(node_id)))
DT_FOREACH_STATUS_OKAY_NODE(DEVICE_CONSTRAINTS_DEFINE)
/**
* @brief Helper macro to initialize a pm state device constraint
*/
#define PM_STATE_DEVICE_CONSTRAINT_INIT(node_id) \
{ \
.dev = DEVICE_DT_GET(node_id), \
.pm_constraints_size = DT_PROP_LEN(node_id, zephyr_disabling_power_states), \
.constraints = PM_CONSTRAINTS_NAME(node_id), \
},
/**
* @brief Helper macro to initialize a pm state device constraint
*/
#define PM_STATE_DEVICE_CONSTRAINT_DEFINE(node_id) \
COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (), \
(PM_STATE_DEVICE_CONSTRAINT_INIT(node_id)))
static struct pm_state_device_constraint _devices_constraints[] = {
DT_FOREACH_STATUS_OKAY_NODE(PM_STATE_DEVICE_CONSTRAINT_DEFINE)
};
#endif /* CONFIG_PM_POLICY_DEVICE_CONSTRAINTS */
/** Lock to synchronize access to the latency request list. */
static struct k_spinlock latency_lock;
/** List of maximum latency requests. */
static sys_slist_t latency_reqs;
/** Maximum CPU latency in us */
static int32_t max_latency_us = SYS_FOREVER_US;
/** Maximum CPU latency in cycles */
static int32_t max_latency_cyc = -1;
/** List of latency change subscribers. */
static sys_slist_t latency_subs;
/** Lock to synchronize access to the events list. */
static struct k_spinlock events_lock;
/** List of events. */
static sys_slist_t events_list;
/** Pointer to Next Event. */
static struct pm_policy_event *next_event;
/** @brief Update maximum allowed latency. */
static void update_max_latency(void)
{
int32_t new_max_latency_us = SYS_FOREVER_US;
struct pm_policy_latency_request *req;
SYS_SLIST_FOR_EACH_CONTAINER(&latency_reqs, req, node) {
if ((new_max_latency_us == SYS_FOREVER_US) ||
((int32_t)req->value_us < new_max_latency_us)) {
new_max_latency_us = (int32_t)req->value_us;
}
}
if (max_latency_us != new_max_latency_us) {
struct pm_policy_latency_subscription *sreq;
int32_t new_max_latency_cyc = -1;
SYS_SLIST_FOR_EACH_CONTAINER(&latency_subs, sreq, node) {
sreq->cb(new_max_latency_us);
}
if (new_max_latency_us != SYS_FOREVER_US) {
new_max_latency_cyc = (int32_t)k_us_to_cyc_ceil32(new_max_latency_us);
}
max_latency_us = new_max_latency_us;
max_latency_cyc = new_max_latency_cyc;
}
}
/** @brief Update next event. */
static void update_next_event(uint32_t cyc)
{
int64_t new_next_event_cyc = -1;
struct pm_policy_event *evt;
/* unset the next event pointer */
next_event = NULL;
SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) {
uint64_t cyc_evt = evt->value_cyc;
/*
* cyc value is a 32-bit rolling counter:
*
* |---------------->-----------------------|
* 0 cyc UINT32_MAX
*
* Values from [0, cyc) are events happening later than
* [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do
* the comparison.
*/
if (cyc_evt < cyc) {
cyc_evt += (uint64_t)UINT32_MAX + 1U;
}
if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) {
new_next_event_cyc = cyc_evt;
next_event = evt;
}
}
}
int32_t pm_policy_next_event_ticks(void)
{
int32_t cyc_evt = -1;
if ((next_event) && (next_event->value_cyc > 0)) {
cyc_evt = next_event->value_cyc - k_cycle_get_32();
cyc_evt = MAX(0, cyc_evt);
BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC,
"HW Cycles per sec should be greater that ticks per sec");
return k_cyc_to_ticks_floor32(cyc_evt);
}
return -1;
}
#ifdef CONFIG_PM_POLICY_DEFAULT
const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
{
int64_t cyc = -1;
uint8_t num_cpu_states;
const struct pm_state_info *cpu_states;
#ifdef CONFIG_PM_NEED_ALL_DEVICES_IDLE
if (pm_device_is_any_busy()) {
return NULL;
}
#endif
if (ticks != K_TICKS_FOREVER) {
cyc = k_ticks_to_cyc_ceil32(ticks);
}
num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states);
if ((next_event) && (next_event->value_cyc >= 0)) {
uint32_t cyc_curr = k_cycle_get_32();
int64_t cyc_evt = next_event->value_cyc - cyc_curr;
/* event happening after cycle counter max value, pad */
if (next_event->value_cyc <= cyc_curr) {
cyc_evt += UINT32_MAX;
}
if (cyc_evt > 0) {
/* if there's no system wakeup event always wins,
* otherwise, who comes earlier wins
*/
if (cyc < 0) {
cyc = cyc_evt;
} else {
cyc = MIN(cyc, cyc_evt);
}
}
}
for (int16_t i = (int16_t)num_cpu_states - 1; i >= 0; i--) {
const struct pm_state_info *state = &cpu_states[i];
uint32_t min_residency_cyc, exit_latency_cyc;
/* check if there is a lock on state + substate */
if (pm_policy_state_lock_is_active(state->state, state->substate_id)) {
continue;
}
min_residency_cyc = k_us_to_cyc_ceil32(state->min_residency_us);
exit_latency_cyc = k_us_to_cyc_ceil32(state->exit_latency_us);
/* skip state if it brings too much latency */
if ((max_latency_cyc >= 0) &&
(exit_latency_cyc >= max_latency_cyc)) {
continue;
}
if ((cyc < 0) ||
(cyc >= (min_residency_cyc + exit_latency_cyc))) {
return state;
}
}
return NULL;
}
#endif
void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_inc(&substate_lock_t[i].lock);
}
}
#endif
}
void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_t cnt = atomic_dec(&substate_lock_t[i].lock);
ARG_UNUSED(cnt);
__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
}
}
#endif
}
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
return (atomic_get(&substate_lock_t[i].lock) != 0);
}
}
#endif
return false;
}
void pm_policy_latency_request_add(struct pm_policy_latency_request *req,
uint32_t value_us)
{
req->value_us = value_us;
k_spinlock_key_t key = k_spin_lock(&latency_lock);
sys_slist_append(&latency_reqs, &req->node);
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_request_update(struct pm_policy_latency_request *req,
uint32_t value_us)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
req->value_us = value_us;
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_request_remove(struct pm_policy_latency_request *req)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
(void)sys_slist_find_and_remove(&latency_reqs, &req->node);
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription *req,
pm_policy_latency_changed_cb_t cb)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
req->cb = cb;
sys_slist_append(&latency_subs, &req->node);
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription *req)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
(void)sys_slist_find_and_remove(&latency_subs, &req->node);
k_spin_unlock(&latency_lock, key);
}
void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
uint32_t cyc = k_cycle_get_32();
evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us);
sys_slist_append(&events_list, &evt->node);
update_next_event(cyc);
k_spin_unlock(&events_lock, key);
}
void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
evt->value_cyc = cycle;
update_next_event(k_cycle_get_32());
k_spin_unlock(&events_lock, key);
}
void pm_policy_event_unregister(struct pm_policy_event *evt)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
(void)sys_slist_find_and_remove(&events_list, &evt->node);
update_next_event(k_cycle_get_32());
k_spin_unlock(&events_lock, key);
}
void pm_policy_device_power_lock_get(const struct device *dev)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state) && defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
if (_devices_constraints[i].dev == dev) {
for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
pm_policy_state_lock_get(
_devices_constraints[i].constraints[j].state,
_devices_constraints[i].constraints[j].substate_id);
}
break;
}
}
#endif
}
void pm_policy_device_power_lock_put(const struct device *dev)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state) && defined(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
if (_devices_constraints[i].dev == dev) {
for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
pm_policy_state_lock_put(
_devices_constraints[i].constraints[j].state,
_devices_constraints[i].constraints[j].substate_id);
}
break;
}
}
#endif
}

View file

@ -0,0 +1,14 @@
# Copyright (c) 2024 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
if(CONFIG_PM)
zephyr_library_sources(policy_events.c policy_latency.c policy_state_lock.c)
if(CONFIG_PM_POLICY_DEVICE_CONSTRAINTS)
zephyr_library_sources(policy_device_lock.c)
endif()
if(CONFIG_PM_POLICY_DEFAULT)
zephyr_library_sources(policy_default.c)
endif()
endif()

34
subsys/pm/policy/Kconfig Normal file
View file

@ -0,0 +1,34 @@
# Copyright (c) 2024 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
if PM
choice PM_POLICY
prompt "Idle State Power Management Policy"
default PM_POLICY_DEFAULT
help
Select the idle state power management policy.
config PM_POLICY_DEFAULT
bool "Default PM policy"
help
This option selects the default PM policy. Default policy is based
on CPU residency times and other constraints imposed by the drivers or
application.
config PM_POLICY_CUSTOM
bool "Custom PM Policy"
help
This options allows applications to override the default policy with
a custom implementation.
endchoice
config PM_POLICY_DEVICE_CONSTRAINTS
bool "Power state constraints per device"
help
This option allows devices to have a list of power states
that when the system transition to them, cause power loss in the device.
This used to set and release power state constraints when
it is needed by the device.
endif # PM

View file

@ -0,0 +1,79 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/pm/policy.h>
#include <zephyr/sys_clock.h>
#include <zephyr/pm/device.h>
extern struct pm_policy_event *next_event;
extern int32_t max_latency_cyc;
const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
{
int64_t cyc = -1;
uint8_t num_cpu_states;
const struct pm_state_info *cpu_states;
#ifdef CONFIG_PM_NEED_ALL_DEVICES_IDLE
if (pm_device_is_any_busy()) {
return NULL;
}
#endif
if (ticks != K_TICKS_FOREVER) {
cyc = k_ticks_to_cyc_ceil32(ticks);
}
num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states);
if ((next_event) && (next_event->value_cyc >= 0)) {
uint32_t cyc_curr = k_cycle_get_32();
int64_t cyc_evt = next_event->value_cyc - cyc_curr;
/* event happening after cycle counter max value, pad */
if (next_event->value_cyc <= cyc_curr) {
cyc_evt += UINT32_MAX;
}
if (cyc_evt > 0) {
/* if there's no system wakeup event always wins,
* otherwise, who comes earlier wins
*/
if (cyc < 0) {
cyc = cyc_evt;
} else {
cyc = MIN(cyc, cyc_evt);
}
}
}
for (int16_t i = (int16_t)num_cpu_states - 1; i >= 0; i--) {
const struct pm_state_info *state = &cpu_states[i];
uint32_t min_residency_cyc, exit_latency_cyc;
/* check if there is a lock on state + substate */
if (pm_policy_state_lock_is_active(state->state, state->substate_id)) {
continue;
}
min_residency_cyc = k_us_to_cyc_ceil32(state->min_residency_us);
exit_latency_cyc = k_us_to_cyc_ceil32(state->exit_latency_us);
/* skip state if it brings too much latency */
if ((max_latency_cyc >= 0) &&
(exit_latency_cyc >= max_latency_cyc)) {
continue;
}
if ((cyc < 0) ||
(cyc >= (min_residency_cyc + exit_latency_cyc))) {
return state;
}
}
return NULL;
}

View file

@ -0,0 +1,120 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/pm/policy.h>
#include <zephyr/pm/state.h>
#include <zephyr/sys/util_macro.h>
#include <zephyr/pm/device.h>
struct pm_state_device_constraint {
const struct device *const dev;
size_t pm_constraints_size;
struct pm_state_constraint *constraints;
};
/**
* @brief Synthesize the name of the object that holds a device pm constraint.
*
* @param dev_id Device identifier.
*/
#define PM_CONSTRAINTS_NAME(node_id) _CONCAT(__devicepmconstraints_, node_id)
/**
* @brief initialize a device pm constraint with information from devicetree.
*
* @param node_id Node identifier.
*/
#define PM_STATE_CONSTRAINT_INIT(node_id) \
{ \
.state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
}
/**
* @brief Helper macro to define a device pm constraints.
*/
#define PM_STATE_CONSTRAINT_DEFINE(i, node_id) \
COND_CODE_1(DT_NODE_HAS_STATUS_OKAY(DT_PHANDLE_BY_IDX(node_id, \
zephyr_disabling_power_states, i)), \
(PM_STATE_CONSTRAINT_INIT(DT_PHANDLE_BY_IDX(node_id, \
zephyr_disabling_power_states, i)),), ())
/**
* @brief Helper macro to generate a list of device pm constraints.
*/
#define PM_STATE_CONSTRAINTS_DEFINE(node_id) \
{ \
LISTIFY(DT_PROP_LEN_OR(node_id, zephyr_disabling_power_states, 0), \
PM_STATE_CONSTRAINT_DEFINE, (), node_id) \
}
/**
* @brief Helper macro to define an array of device pm constraints.
*/
#define CONSTRAINTS_DEFINE(node_id) \
Z_DECL_ALIGN(struct pm_state_constraint) \
PM_CONSTRAINTS_NAME(node_id)[] = \
PM_STATE_CONSTRAINTS_DEFINE(node_id);
#define DEVICE_CONSTRAINTS_DEFINE(node_id) \
COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (), \
(CONSTRAINTS_DEFINE(node_id)))
DT_FOREACH_STATUS_OKAY_NODE(DEVICE_CONSTRAINTS_DEFINE)
/**
* @brief Helper macro to initialize a pm state device constraint
*/
#define PM_STATE_DEVICE_CONSTRAINT_INIT(node_id) \
{ \
.dev = DEVICE_DT_GET(node_id), \
.pm_constraints_size = DT_PROP_LEN(node_id, zephyr_disabling_power_states), \
.constraints = PM_CONSTRAINTS_NAME(node_id), \
},
/**
* @brief Helper macro to initialize a pm state device constraint
*/
#define PM_STATE_DEVICE_CONSTRAINT_DEFINE(node_id) \
COND_CODE_0(DT_NODE_HAS_PROP(node_id, zephyr_disabling_power_states), (), \
(PM_STATE_DEVICE_CONSTRAINT_INIT(node_id)))
static struct pm_state_device_constraint _devices_constraints[] = {
DT_FOREACH_STATUS_OKAY_NODE(PM_STATE_DEVICE_CONSTRAINT_DEFINE)
};
void pm_policy_device_power_lock_get(const struct device *dev)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
if (_devices_constraints[i].dev == dev) {
for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
pm_policy_state_lock_get(
_devices_constraints[i].constraints[j].state,
_devices_constraints[i].constraints[j].substate_id);
}
break;
}
}
#endif
}
void pm_policy_device_power_lock_put(const struct device *dev)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(_devices_constraints); i++) {
if (_devices_constraints[i].dev == dev) {
for (size_t j = 0; j < _devices_constraints[i].pm_constraints_size; j++) {
pm_policy_state_lock_put(
_devices_constraints[i].constraints[j].state,
_devices_constraints[i].constraints[j].substate_id);
}
break;
}
}
#endif
}

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <zephyr/kernel.h>
#include <zephyr/pm/policy.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys_clock.h>
#include <zephyr/sys/time_units.h>
/** Lock to synchronize access to the events list. */
static struct k_spinlock events_lock;
/** List of events. */
static sys_slist_t events_list;
/** Pointer to Next Event. */
struct pm_policy_event *next_event;
/** @brief Update next event. */
static void update_next_event(uint32_t cyc)
{
int64_t new_next_event_cyc = -1;
struct pm_policy_event *evt;
/* unset the next event pointer */
next_event = NULL;
SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) {
uint64_t cyc_evt = evt->value_cyc;
/*
* cyc value is a 32-bit rolling counter:
*
* |---------------->-----------------------|
* 0 cyc UINT32_MAX
*
* Values from [0, cyc) are events happening later than
* [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do
* the comparison.
*/
if (cyc_evt < cyc) {
cyc_evt += (uint64_t)UINT32_MAX + 1U;
}
if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) {
new_next_event_cyc = cyc_evt;
next_event = evt;
}
}
}
int32_t pm_policy_next_event_ticks(void)
{
int32_t cyc_evt = -1;
if ((next_event) && (next_event->value_cyc > 0)) {
cyc_evt = next_event->value_cyc - k_cycle_get_32();
cyc_evt = MAX(0, cyc_evt);
BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC,
"HW Cycles per sec should be greater that ticks per sec");
return k_cyc_to_ticks_floor32(cyc_evt);
}
return -1;
}
void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
uint32_t cyc = k_cycle_get_32();
evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us);
sys_slist_append(&events_list, &evt->node);
update_next_event(cyc);
k_spin_unlock(&events_lock, key);
}
void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
evt->value_cyc = cycle;
update_next_event(k_cycle_get_32());
k_spin_unlock(&events_lock, key);
}
void pm_policy_event_unregister(struct pm_policy_event *evt)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
(void)sys_slist_find_and_remove(&events_list, &evt->node);
update_next_event(k_cycle_get_32());
k_spin_unlock(&events_lock, key);
}

View file

@ -0,0 +1,107 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <zephyr/pm/policy.h>
#include <zephyr/spinlock.h>
#include <zephyr/sys/time_units.h>
/** Lock to synchronize access to the latency request list. */
static struct k_spinlock latency_lock;
/** List of maximum latency requests. */
static sys_slist_t latency_reqs;
/** Maximum CPU latency in us */
static int32_t max_latency_us = SYS_FOREVER_US;
/** Maximum CPU latency in cycles */
int32_t max_latency_cyc = -1;
/** List of latency change subscribers. */
static sys_slist_t latency_subs;
/** @brief Update maximum allowed latency. */
static void update_max_latency(void)
{
int32_t new_max_latency_us = SYS_FOREVER_US;
struct pm_policy_latency_request *req;
SYS_SLIST_FOR_EACH_CONTAINER(&latency_reqs, req, node) {
if ((new_max_latency_us == SYS_FOREVER_US) ||
((int32_t)req->value_us < new_max_latency_us)) {
new_max_latency_us = (int32_t)req->value_us;
}
}
if (max_latency_us != new_max_latency_us) {
struct pm_policy_latency_subscription *sreq;
int32_t new_max_latency_cyc = -1;
SYS_SLIST_FOR_EACH_CONTAINER(&latency_subs, sreq, node) {
sreq->cb(new_max_latency_us);
}
if (new_max_latency_us != SYS_FOREVER_US) {
new_max_latency_cyc = (int32_t)k_us_to_cyc_ceil32(new_max_latency_us);
}
max_latency_us = new_max_latency_us;
max_latency_cyc = new_max_latency_cyc;
}
}
void pm_policy_latency_request_add(struct pm_policy_latency_request *req,
uint32_t value_us)
{
req->value_us = value_us;
k_spinlock_key_t key = k_spin_lock(&latency_lock);
sys_slist_append(&latency_reqs, &req->node);
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_request_update(struct pm_policy_latency_request *req,
uint32_t value_us)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
req->value_us = value_us;
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_request_remove(struct pm_policy_latency_request *req)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
(void)sys_slist_find_and_remove(&latency_reqs, &req->node);
update_max_latency();
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription *req,
pm_policy_latency_changed_cb_t cb)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
req->cb = cb;
sys_slist_append(&latency_subs, &req->node);
k_spin_unlock(&latency_lock, key);
}
void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription *req)
{
k_spinlock_key_t key = k_spin_lock(&latency_lock);
(void)sys_slist_find_and_remove(&latency_subs, &req->node);
k_spin_unlock(&latency_lock, key);
}

View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/pm/policy.h>
#include <zephyr/pm/state.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/toolchain.h>
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
#define DT_SUB_LOCK_INIT(node_id) \
{ .state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
.lock = ATOMIC_INIT(0), \
},
/**
* State and substate lock structure.
*
* This struct is associating a reference counting to each <state,substate>
* couple to be used with the pm_policy_substate_lock_* functions.
*
* Operations on this array are in the order of O(n) with the number of power
* states and this is mostly due to the random nature of the substate value
* (that can be anything from a small integer value to a bitmask). We can
* probably do better with an hashmap.
*/
static struct {
enum pm_state state;
uint8_t substate_id;
atomic_t lock;
} substate_lock_t[] = {
DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
};
#endif
void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_inc(&substate_lock_t[i].lock);
}
}
#endif
}
void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_t cnt = atomic_dec(&substate_lock_t[i].lock);
ARG_UNUSED(cnt);
__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
}
}
#endif
}
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
return (atomic_get(&substate_lock_t[i].lock) != 0);
}
}
#endif
return false;
}