arch: riscv: smp: allow other IPI implementation

The currently IPI implementation assumes that CLINT exists in the
system, however, that might not be the case as IPI can be implemented
with PLIC that supports software-triggering as well, such as the Andes
NCEPLIC100.

Refactor the CLINT-based IPI implementations into `ipi_clint.c`, and
create Kconfig that selects the CLINT implementation when
`sifive-clint0` exists and enabled, otherwise default to
`RISCV_SMP_IPI_CUSTOM` which allows OOT implementation. This also
makes way for the upstreaming of non-clint IPI implementation later.

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
This commit is contained in:
Yong Cong Sin 2024-11-12 13:15:24 +08:00 committed by Anas Nashif
parent bb7319e7f2
commit 9c3482b1d5
5 changed files with 142 additions and 97 deletions

View file

@ -37,6 +37,31 @@ config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
and most people should say n here to minimize context switching
overhead.
choice RISCV_SMP_IPI_IMPL
prompt "RISC-V SMP IPI implementation"
depends on SMP
default RISCV_SMP_IPI_CLINT if DT_HAS_SIFIVE_CLINT0_ENABLED
default RISCV_SMP_IPI_CUSTOM
config RISCV_SMP_IPI_CLINT
bool "CLINT-based IPI"
depends on DT_HAS_SIFIVE_CLINT0_ENABLED
help
Use CLINT-based IPI implementation.
config RISCV_SMP_IPI_CUSTOM
bool "Custom IPI implementation"
help
Allow custom IPI implementation.
When this is selected, the following functions must be provided:
- arch_sched_directed_ipi()
- arch_flush_fpu_ipi() if CONFIG_FPU_SHARING
- arch_spin_relax() if CONFIG_FPU_SHARING
- arch_smp_init()
endchoice # RISCV_SMP_IPI_IMPL
menu "RISCV Processor Options"
config INCLUDE_RESET_VECTOR

View file

@ -17,6 +17,12 @@ if ((CONFIG_MP_MAX_NUM_CPUS GREATER 1) OR (CONFIG_SMP))
zephyr_library_sources(smp.c)
endif ()
if (CONFIG_SMP)
zephyr_library_sources(ipi.c)
zephyr_library_sources_ifdef(CONFIG_RISCV_SMP_IPI_CLINT ipi_clint.c)
endif()
zephyr_library_sources_ifdef(CONFIG_FPU_SHARING fpu.c fpu.S)
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)

14
arch/riscv/core/ipi.c Normal file
View file

@ -0,0 +1,14 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ipi.h>
#include <zephyr/kernel.h>
void arch_sched_broadcast_ipi(void)
{
arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
}

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ipi.h>
#include <ksched.h>
#include <zephyr/kernel.h>
#define MSIP_BASE 0x2000000UL
#define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
#define IPI_SCHED 0
#define IPI_FPU_FLUSH 1
void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int key = arch_irq_lock();
unsigned int id = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (unsigned int i = 0; i < num_cpus; i++) {
if ((i != id) && _kernel.cpus[i].arch.online && ((cpu_bitmap & BIT(i)) != 0)) {
atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
MSIP(_kernel.cpus[i].arch.hartid) = 1;
}
}
arch_irq_unlock(key);
}
#ifdef CONFIG_FPU_SHARING
void arch_flush_fpu_ipi(unsigned int cpu)
{
atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
}
#endif /* CONFIG_FPU_SHARING */
static void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
MSIP(csr_read(mhartid)) = 0;
atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
z_sched_ipi();
}
#ifdef CONFIG_FPU_SHARING
if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
/* disable IRQs */
csr_clear(mstatus, MSTATUS_IEN);
/* perform the flush */
arch_flush_local_fpu();
/*
* No need to re-enable IRQs here as long as
* this remains the last case.
*/
}
#endif /* CONFIG_FPU_SHARING */
}
#ifdef CONFIG_FPU_SHARING
/*
* Make sure there is no pending FPU flush request for this CPU while
* waiting for a contended spinlock to become available. This prevents
* a deadlock when the lock we need is already taken by another CPU
* that also wants its FPU content to be reinstated while such content
* is still live in this CPU's FPU.
*/
void arch_spin_relax(void)
{
atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
/*
* We may not be in IRQ context here hence cannot use
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}
}
#endif /* CONFIG_FPU_SHARING */
int arch_smp_init(void)
{
IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
irq_enable(RISCV_IRQ_MSOFT);
return 0;
}

View file

@ -7,7 +7,6 @@
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <ksched.h>
#include <ipi.h>
#include <zephyr/irq.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/arch/riscv/irq.h>
@ -81,99 +80,3 @@ void arch_secondary_cpu_init(int hartid)
#endif /* CONFIG_PLIC_IRQ_AFFINITY */
riscv_cpu_init[cpu_num].fn(riscv_cpu_init[cpu_num].arg);
}
#ifdef CONFIG_SMP
#define MSIP_BASE 0x2000000UL
#define MSIP(hartid) ((volatile uint32_t *)MSIP_BASE)[hartid]
static atomic_val_t cpu_pending_ipi[CONFIG_MP_MAX_NUM_CPUS];
#define IPI_SCHED 0
#define IPI_FPU_FLUSH 1
void arch_sched_directed_ipi(uint32_t cpu_bitmap)
{
unsigned int key = arch_irq_lock();
unsigned int id = _current_cpu->id;
unsigned int num_cpus = arch_num_cpus();
for (unsigned int i = 0; i < num_cpus; i++) {
if ((i != id) && _kernel.cpus[i].arch.online &&
((cpu_bitmap & BIT(i)) != 0)) {
atomic_set_bit(&cpu_pending_ipi[i], IPI_SCHED);
MSIP(_kernel.cpus[i].arch.hartid) = 1;
}
}
arch_irq_unlock(key);
}
void arch_sched_broadcast_ipi(void)
{
arch_sched_directed_ipi(IPI_ALL_CPUS_MASK);
}
#ifdef CONFIG_FPU_SHARING
void arch_flush_fpu_ipi(unsigned int cpu)
{
atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
}
#endif
static void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
MSIP(csr_read(mhartid)) = 0;
atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]);
if (pending_ipi & ATOMIC_MASK(IPI_SCHED)) {
z_sched_ipi();
}
#ifdef CONFIG_FPU_SHARING
if (pending_ipi & ATOMIC_MASK(IPI_FPU_FLUSH)) {
/* disable IRQs */
csr_clear(mstatus, MSTATUS_IEN);
/* perform the flush */
arch_flush_local_fpu();
/*
* No need to re-enable IRQs here as long as
* this remains the last case.
*/
}
#endif
}
#ifdef CONFIG_FPU_SHARING
/*
* Make sure there is no pending FPU flush request for this CPU while
* waiting for a contended spinlock to become available. This prevents
* a deadlock when the lock we need is already taken by another CPU
* that also wants its FPU content to be reinstated while such content
* is still live in this CPU's FPU.
*/
void arch_spin_relax(void)
{
atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id];
if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
/*
* We may not be in IRQ context here hence cannot use
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}
}
#endif
int arch_smp_init(void)
{
IRQ_CONNECT(RISCV_IRQ_MSOFT, 0, sched_ipi_handler, NULL, 0);
irq_enable(RISCV_IRQ_MSOFT);
return 0;
}
#endif /* CONFIG_SMP */