1 Commits

Author SHA1 Message Date
e64a712de9 adds riscv_vp platform 2025-08-14 16:28:34 +02:00
121 changed files with 1562 additions and 4379 deletions

View File

@@ -1,4 +1,4 @@
# SPDX-License-Identifier: BSD-2-Clause # SPDX-License-Identifier: GPL-2.0-only
# See here for more information about the format and editor support: # See here for more information about the format and editor support:
# https://editorconfig.org/ # https://editorconfig.org/

View File

@@ -151,12 +151,6 @@ endif
# Guess the compiler's XLEN # Guess the compiler's XLEN
OPENSBI_CC_XLEN := $(shell TMP=`$(CC) $(CLANG_TARGET) -dumpmachine | sed 's/riscv\([0-9][0-9]\).*/\1/'`; echo $${TMP}) OPENSBI_CC_XLEN := $(shell TMP=`$(CC) $(CLANG_TARGET) -dumpmachine | sed 's/riscv\([0-9][0-9]\).*/\1/'`; echo $${TMP})
# If guessing XLEN fails, default to 64
ifneq ($(OPENSBI_CC_XLEN),32)
ifneq ($(OPENSBI_CC_XLEN),64)
OPENSBI_CC_XLEN = 64
endif
endif
# Guess the compiler's ABI and ISA # Guess the compiler's ABI and ISA
ifneq ($(CC_IS_CLANG),y) ifneq ($(CC_IS_CLANG),y)
@@ -380,7 +374,6 @@ GENFLAGS += $(firmware-genflags-y)
CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing -ffunction-sections -fdata-sections CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing -ffunction-sections -fdata-sections
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
CFLAGS += -std=gnu11
CFLAGS += $(REPRODUCIBLE_FLAGS) CFLAGS += $(REPRODUCIBLE_FLAGS)
# Optionally supported flags # Optionally supported flags
ifeq ($(CC_SUPPORT_VECTOR),y) ifeq ($(CC_SUPPORT_VECTOR),y)
@@ -451,14 +444,11 @@ DTSCPPFLAGS = $(CPPFLAGS) -nostdinc -nostdlib -fno-builtin -D__DTS__ -x assemble
ifneq ($(DEBUG),) ifneq ($(DEBUG),)
CFLAGS += -O0 CFLAGS += -O0
ELFFLAGS += -Wl,--print-gc-sections
else else
CFLAGS += -O2 CFLAGS += -O2
endif endif
ifeq ($(V), 1)
ELFFLAGS += -Wl,--print-gc-sections
endif
# Setup functions for compilation # Setup functions for compilation
define dynamic_flags define dynamic_flags
-I$(shell dirname $(2)) -D__OBJNAME__=$(subst -,_,$(shell basename $(1) .o)) -I$(shell dirname $(2)) -D__OBJNAME__=$(subst -,_,$(shell basename $(1) .o))

View File

@@ -13,7 +13,7 @@ The FPGA SoC currently contains the following peripherals:
- Bootrom containing zero stage bootloader and device tree. - Bootrom containing zero stage bootloader and device tree.
To build platform specific library and firmwares, provide the To build platform specific library and firmwares, provide the
*PLATFORM=generic* parameter to the top level `make` command. *PLATFORM=fpga/ariane* parameter to the top level `make` command.
Platform Options Platform Options
---------------- ----------------
@@ -26,7 +26,7 @@ Building Ariane FPGA Platform
**Linux Kernel Payload** **Linux Kernel Payload**
``` ```
make PLATFORM=generic FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image make PLATFORM=fpga/ariane FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
``` ```
Booting Ariane FPGA Platform Booting Ariane FPGA Platform

View File

@@ -7,8 +7,8 @@ processor from ETH Zurich. To this end, Ariane has been equipped with a
different L1 cache subsystem that follows a write-through protocol and that has different L1 cache subsystem that follows a write-through protocol and that has
support for cache invalidations and atomics. support for cache invalidations and atomics.
To build platform specific library and firmwares, provide the *PLATFORM=generic* To build platform specific library and firmwares, provide the
parameter to the top level `make` command. *PLATFORM=fpga/openpiton* parameter to the top level `make` command.
Platform Options Platform Options
---------------- ----------------
@@ -21,7 +21,7 @@ Building Ariane FPGA Platform
**Linux Kernel Payload** **Linux Kernel Payload**
``` ```
make PLATFORM=generic FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image make PLATFORM=fpga/openpiton FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
``` ```
Booting Ariane FPGA Platform Booting Ariane FPGA Platform

View File

@@ -47,8 +47,6 @@ RISC-V Platforms Using Generic Platform
* **SiFive HiFive Unleashed** (*[sifive_fu540.md]*) * **SiFive HiFive Unleashed** (*[sifive_fu540.md]*)
* **Spike** (*[spike.md]*) * **Spike** (*[spike.md]*)
* **T-HEAD C9xx series Processors** (*[thead-c9xx.md]*) * **T-HEAD C9xx series Processors** (*[thead-c9xx.md]*)
* **OpenPiton FPGA SoC** (*[fpga-openpiton.md]*)
* **Ariane FPGA SoC** (*[fpga-ariane.md]*)
[andes-ae350.md]: andes-ae350.md [andes-ae350.md]: andes-ae350.md
[qemu_virt.md]: qemu_virt.md [qemu_virt.md]: qemu_virt.md
@@ -57,5 +55,3 @@ RISC-V Platforms Using Generic Platform
[sifive_fu540.md]: sifive_fu540.md [sifive_fu540.md]: sifive_fu540.md
[spike.md]: spike.md [spike.md]: spike.md
[thead-c9xx.md]: thead-c9xx.md [thead-c9xx.md]: thead-c9xx.md
[fpga-openpiton.md]: fpga-openpiton.md
[fpga-ariane.md]: fpga-ariane.md

View File

@@ -21,12 +21,20 @@ OpenSBI currently supports the following virtual and hardware platforms:
* **Kendryte K210 SoC**: Platform support for the Kendryte K210 SoC used on * **Kendryte K210 SoC**: Platform support for the Kendryte K210 SoC used on
boards such as the Kendryte KD233 or the Sipeed MAIX Dock. boards such as the Kendryte KD233 or the Sipeed MAIX Dock.
* **Ariane FPGA SoC**: Platform support for the Ariane FPGA SoC used on
Genesys 2 board. More details on this platform can be found in the file
*[fpga-ariane.md]*.
* **Andes AE350 SoC**: Platform support for the Andes's SoC (AE350). More * **Andes AE350 SoC**: Platform support for the Andes's SoC (AE350). More
details on this platform can be found in the file *[andes-ae350.md]*. details on this platform can be found in the file *[andes-ae350.md]*.
* **Spike**: Platform support for the Spike emulator. More * **Spike**: Platform support for the Spike emulator. More
details on this platform can be found in the file *[spike.md]*. details on this platform can be found in the file *[spike.md]*.
* **OpenPiton FPGA SoC**: Platform support OpenPiton research platform based
on ariane core. More details on this platform can be found in the file
*[fpga-openpiton.md]*.
* **Shakti C-class SoC Platform**: Platform support for Shakti C-class * **Shakti C-class SoC Platform**: Platform support for Shakti C-class
processor based SOCs. More details on this platform can be found in the processor based SOCs. More details on this platform can be found in the
file *[shakti_cclass.md]*. file *[shakti_cclass.md]*.
@@ -44,8 +52,10 @@ comments to facilitate the implementation.
[generic.md]: generic.md [generic.md]: generic.md
[qemu_virt.md]: qemu_virt.md [qemu_virt.md]: qemu_virt.md
[sifive_fu540.md]: sifive_fu540.md [sifive_fu540.md]: sifive_fu540.md
[fpga-ariane.md]: fpga-ariane.md
[andes-ae350.md]: andes-ae350.md [andes-ae350.md]: andes-ae350.md
[thead-c910.md]: thead-c910.md [thead-c910.md]: thead-c910.md
[spike.md]: spike.md [spike.md]: spike.md
[fpga-openpiton.md]: fpga-openpiton.md
[shakti_cclass.md]: shakti_cclass.md [shakti_cclass.md]: shakti_cclass.md
[renesas-rzfive.md]: renesas-rzfive.md [renesas-rzfive.md]: renesas-rzfive.md

View File

@@ -122,50 +122,6 @@ enum {
RV_DBTR_DECLARE_BIT_MASK(MC, TYPE, 4), RV_DBTR_DECLARE_BIT_MASK(MC, TYPE, 4),
}; };
/* ICOUNT - Match Control Type Register */
enum {
RV_DBTR_DECLARE_BIT(ICOUNT, ACTION, 0),
RV_DBTR_DECLARE_BIT(ICOUNT, U, 6),
RV_DBTR_DECLARE_BIT(ICOUNT, S, 7),
RV_DBTR_DECLARE_BIT(ICOUNT, PENDING, 8),
RV_DBTR_DECLARE_BIT(ICOUNT, M, 9),
RV_DBTR_DECLARE_BIT(ICOUNT, COUNT, 10),
RV_DBTR_DECLARE_BIT(ICOUNT, HIT, 24),
RV_DBTR_DECLARE_BIT(ICOUNT, VU, 25),
RV_DBTR_DECLARE_BIT(ICOUNT, VS, 26),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 59),
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 60),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 27),
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 28),
#else
#error "Unknown __riscv_xlen"
#endif
};
enum {
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, ACTION, 6),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, U, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, S, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, PENDING, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, M, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, COUNT, 14),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, HIT, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VU, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VS, 1),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
#else
#error "Unknown __riscv_xlen"
#endif
};
/* MC6 - Match Control 6 Type Register */ /* MC6 - Match Control 6 Type Register */
enum { enum {
RV_DBTR_DECLARE_BIT(MC6, LOAD, 0), RV_DBTR_DECLARE_BIT(MC6, LOAD, 0),

View File

@@ -189,7 +189,7 @@
#define TOPI_IID_SHIFT 16 #define TOPI_IID_SHIFT 16
#define TOPI_IID_MASK 0xfff #define TOPI_IID_MASK 0xfff
#define TOPI_IPRIO_MASK 0xff #define TOPI_IPRIO_MASK 0xff
#if __riscv_xlen == 64 #if __riscv_xlen == 64
#define MHPMEVENT_OF (_UL(1) << 63) #define MHPMEVENT_OF (_UL(1) << 63)
@@ -378,9 +378,6 @@
#define CSR_SSTATEEN2 0x10E #define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F #define CSR_SSTATEEN3 0x10F
/* Supervisor Resource Management Configuration CSRs */
#define CSR_SRMCFG 0x181
/* Machine-Level Control transfer records CSRs */ /* Machine-Level Control transfer records CSRs */
#define CSR_MCTRCTL 0x34e #define CSR_MCTRCTL 0x34e
@@ -786,40 +783,6 @@
#define CSR_VTYPE 0xc21 #define CSR_VTYPE 0xc21
#define CSR_VLENB 0xc22 #define CSR_VLENB 0xc22
/* Custom CSR ranges */
#define CSR_CUSTOM0_U_RW_BASE 0x800
#define CSR_CUSTOM0_U_RW_COUNT 0x100
#define CSR_CUSTOM1_U_RO_BASE 0xCC0
#define CSR_CUSTOM1_U_RO_COUNT 0x040
#define CSR_CUSTOM2_S_RW_BASE 0x5C0
#define CSR_CUSTOM2_S_RW_COUNT 0x040
#define CSR_CUSTOM3_S_RW_BASE 0x9C0
#define CSR_CUSTOM3_S_RW_COUNT 0x040
#define CSR_CUSTOM4_S_RO_BASE 0xDC0
#define CSR_CUSTOM4_S_RO_COUNT 0x040
#define CSR_CUSTOM5_HS_RW_BASE 0x6C0
#define CSR_CUSTOM5_HS_RW_COUNT 0x040
#define CSR_CUSTOM6_HS_RW_BASE 0xAC0
#define CSR_CUSTOM6_HS_RW_COUNT 0x040
#define CSR_CUSTOM7_HS_RO_BASE 0xEC0
#define CSR_CUSTOM7_HS_RO_COUNT 0x040
#define CSR_CUSTOM8_M_RW_BASE 0x7C0
#define CSR_CUSTOM8_M_RW_COUNT 0x040
#define CSR_CUSTOM9_M_RW_BASE 0xBC0
#define CSR_CUSTOM9_M_RW_COUNT 0x040
#define CSR_CUSTOM10_M_RO_BASE 0xFC0
#define CSR_CUSTOM10_M_RO_COUNT 0x040
/* ===== Trap/Exception Causes ===== */ /* ===== Trap/Exception Causes ===== */
#define CAUSE_MISALIGNED_FETCH 0x0 #define CAUSE_MISALIGNED_FETCH 0x0
@@ -852,8 +815,6 @@
#define SMSTATEEN0_FCSR (_ULL(1) << SMSTATEEN0_FCSR_SHIFT) #define SMSTATEEN0_FCSR (_ULL(1) << SMSTATEEN0_FCSR_SHIFT)
#define SMSTATEEN0_CTR_SHIFT 54 #define SMSTATEEN0_CTR_SHIFT 54
#define SMSTATEEN0_CTR (_ULL(1) << SMSTATEEN0_CTR_SHIFT) #define SMSTATEEN0_CTR (_ULL(1) << SMSTATEEN0_CTR_SHIFT)
#define SMSTATEEN0_SRMCFG_SHIFT 55
#define SMSTATEEN0_SRMCFG (_ULL(1) << SMSTATEEN0_SRMCFG_SHIFT)
#define SMSTATEEN0_CONTEXT_SHIFT 57 #define SMSTATEEN0_CONTEXT_SHIFT 57
#define SMSTATEEN0_CONTEXT (_ULL(1) << SMSTATEEN0_CONTEXT_SHIFT) #define SMSTATEEN0_CONTEXT (_ULL(1) << SMSTATEEN0_CONTEXT_SHIFT)
#define SMSTATEEN0_IMSIC_SHIFT 58 #define SMSTATEEN0_IMSIC_SHIFT 58
@@ -940,16 +901,16 @@
#define INSN_MASK_C_FSWSP 0xe003 #define INSN_MASK_C_FSWSP 0xe003
#define INSN_MATCH_C_LHU 0x8400 #define INSN_MATCH_C_LHU 0x8400
#define INSN_MASK_C_LHU 0xfc43 #define INSN_MASK_C_LHU 0xfc43
#define INSN_MATCH_C_LH 0x8440 #define INSN_MATCH_C_LH 0x8440
#define INSN_MASK_C_LH 0xfc43 #define INSN_MASK_C_LH 0xfc43
#define INSN_MATCH_C_SH 0x8c00 #define INSN_MATCH_C_SH 0x8c00
#define INSN_MASK_C_SH 0xfc43 #define INSN_MASK_C_SH 0xfc43
#define INSN_MASK_WFI 0xffffff00 #define INSN_MASK_WFI 0xffffff00
#define INSN_MATCH_WFI 0x10500000 #define INSN_MATCH_WFI 0x10500000
#define INSN_MASK_FENCE_TSO 0xfff0707f #define INSN_MASK_FENCE_TSO 0xffffffff
#define INSN_MATCH_FENCE_TSO 0x8330000f #define INSN_MATCH_FENCE_TSO 0x8330000f
#define INSN_MASK_VECTOR_UNIT_STRIDE 0xfdf0707f #define INSN_MASK_VECTOR_UNIT_STRIDE 0xfdf0707f
@@ -1312,7 +1273,7 @@
/* 64-bit read for VS-stage address translation (RV64) */ /* 64-bit read for VS-stage address translation (RV64) */
#define INSN_PSEUDO_VS_LOAD 0x00003000 #define INSN_PSEUDO_VS_LOAD 0x00003000
/* 64-bit write for VS-stage address translation (RV64) */ /* 64-bit write for VS-stage address translation (RV64) */
#define INSN_PSEUDO_VS_STORE 0x00003020 #define INSN_PSEUDO_VS_STORE 0x00003020
#elif __riscv_xlen == 32 #elif __riscv_xlen == 32
@@ -1320,7 +1281,7 @@
#define INSN_PSEUDO_VS_LOAD 0x00002000 #define INSN_PSEUDO_VS_LOAD 0x00002000
/* 32-bit write for VS-stage address translation (RV32) */ /* 32-bit write for VS-stage address translation (RV32) */
#define INSN_PSEUDO_VS_STORE 0x00002020 #define INSN_PSEUDO_VS_STORE 0x00002020
#else #else
#error "Unexpected __riscv_xlen" #error "Unexpected __riscv_xlen"
@@ -1340,11 +1301,11 @@
#define SHIFT_AQRL 25 #define SHIFT_AQRL 25
#define VM_MASK 0x1 #define VM_MASK 0x1
#define VIEW_MASK 0x3 #define VIEW_MASK 0x3
#define VSEW_MASK 0x3 #define VSEW_MASK 0x3
#define VLMUL_MASK 0x7 #define VLMUL_MASK 0x7
#define VD_MASK 0x1f #define VD_MASK 0x1f
#define VS2_MASK 0x1f #define VS2_MASK 0x1f
#define INSN_16BIT_MASK 0x3 #define INSN_16BIT_MASK 0x3
#define INSN_32BIT_MASK 0x1c #define INSN_32BIT_MASK 0x1c
@@ -1356,8 +1317,8 @@
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#define SH_VSEW 3 #define SH_VSEW 3
#define SH_VIEW 12 #define SH_VIEW 12
#define SH_VD 7 #define SH_VD 7
#define SH_VS2 20 #define SH_VS2 20
#define SH_VM 25 #define SH_VM 25
@@ -1405,17 +1366,17 @@
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f)) (s32)(((insn) >> 7) & 0x1f))
#define IS_MASKED(insn) (((insn >> SH_VM) & VM_MASK) == 0) #define IS_MASKED(insn) (((insn >> SH_VM) & VM_MASK) == 0)
#define GET_VD(insn) ((insn >> SH_VD) & VD_MASK) #define GET_VD(insn) ((insn >> SH_VD) & VD_MASK)
#define GET_VS2(insn) ((insn >> SH_VS2) & VS2_MASK) #define GET_VS2(insn) ((insn >> SH_VS2) & VS2_MASK)
#define GET_VIEW(insn) (((insn) >> SH_VIEW) & VIEW_MASK) #define GET_VIEW(insn) (((insn) >> SH_VIEW) & VIEW_MASK)
#define GET_MEW(insn) (((insn) >> SH_MEW) & 1) #define GET_MEW(insn) (((insn) >> SH_MEW) & 1)
#define GET_VSEW(vtype) (((vtype) >> SH_VSEW) & VSEW_MASK) #define GET_VSEW(vtype) (((vtype) >> SH_VSEW) & VSEW_MASK)
#define GET_VLMUL(vtype) ((vtype) & VLMUL_MASK) #define GET_VLMUL(vtype) ((vtype) & VLMUL_MASK)
#define GET_LEN(view) (1UL << (view)) #define GET_LEN(view) (1UL << (view))
#define GET_NF(insn) (1 + ((insn >> 29) & 7)) #define GET_NF(insn) (1 + ((insn >> 29) & 7))
#define GET_VEMUL(vlmul, view, vsew) ((vlmul + view - vsew) & 7) #define GET_VEMUL(vlmul, view, vsew) ((vlmul + view - vsew) & 7)
#define GET_EMUL(vemul) (1UL << ((vemul) >= 4 ? 0 : (vemul))) #define GET_EMUL(vemul) (1UL << ((vemul) >= 4 ? 0 : (vemul)))
#define CSRRW 1 #define CSRRW 1
#define CSRRS 2 #define CSRRS 2

View File

@@ -121,9 +121,6 @@ struct sbi_domain_memregion {
((__flags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK) && \ ((__flags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK) && \
!(__flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK)) !(__flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK))
#define SBI_DOMAIN_MEMREGION_IS_FIRMWARE(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_FW) ? true : false) \
/** Bit to control if permissions are enforced on all modes */ /** Bit to control if permissions are enforced on all modes */
#define SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS (1UL << 6) #define SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS (1UL << 6)
@@ -160,7 +157,6 @@ struct sbi_domain_memregion {
SBI_DOMAIN_MEMREGION_M_EXECUTABLE) SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
#define SBI_DOMAIN_MEMREGION_MMIO (1UL << 31) #define SBI_DOMAIN_MEMREGION_MMIO (1UL << 31)
#define SBI_DOMAIN_MEMREGION_FW (1UL << 30)
unsigned long flags; unsigned long flags;
}; };
@@ -253,13 +249,6 @@ void sbi_domain_memregion_init(unsigned long addr,
unsigned long flags, unsigned long flags,
struct sbi_domain_memregion *reg); struct sbi_domain_memregion *reg);
/**
* Return the Smepmp pmpcfg LRWX encoding for the flags in @reg.
*
* @param reg pointer to memory region; its flags field encodes permissions.
*/
unsigned int sbi_domain_get_smepmp_flags(struct sbi_domain_memregion *reg);
/** /**
* Check whether we can access specified address for given mode and * Check whether we can access specified address for given mode and
* memory region flags under a domain * memory region flags under a domain

View File

@@ -291,15 +291,6 @@ struct sbi_pmu_event_info {
#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5) #define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6) #define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7) #define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
/* Event configuration mask */
#define SBI_PMU_CFG_EVENT_MASK \
( \
SBI_PMU_CFG_FLAG_SET_VUINH | \
SBI_PMU_CFG_FLAG_SET_VSINH | \
SBI_PMU_CFG_FLAG_SET_UINH | \
SBI_PMU_CFG_FLAG_SET_SINH | \
SBI_PMU_CFG_FLAG_SET_MINH \
)
/* Flags defined for counter start function */ /* Flags defined for counter start function */
#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0) #define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)

View File

@@ -79,14 +79,8 @@ enum sbi_hart_extensions {
SBI_HART_EXT_SMCTR, SBI_HART_EXT_SMCTR,
/** HART has CTR S-mode CSRs */ /** HART has CTR S-mode CSRs */
SBI_HART_EXT_SSCTR, SBI_HART_EXT_SSCTR,
/** Hart has Ssqosid extension */
SBI_HART_EXT_SSQOSID,
/** HART has Ssstateen extension **/ /** HART has Ssstateen extension **/
SBI_HART_EXT_SSSTATEEN, SBI_HART_EXT_SSSTATEEN,
/** Hart has Xsfcflushdlone extension */
SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1,
/** Hart has Xsfcease extension */
SBI_HART_EXT_XSIFIVE_CEASE,
/** Maximum index of Hart extension */ /** Maximum index of Hart extension */
SBI_HART_EXT_MAX, SBI_HART_EXT_MAX,
@@ -107,6 +101,21 @@ enum sbi_hart_csrs {
SBI_HART_CSR_MAX, SBI_HART_CSR_MAX,
}; };
/*
* Smepmp enforces access boundaries between M-mode and
* S/U-mode. When it is enabled, the PMPs are programmed
* such that M-mode doesn't have access to S/U-mode memory.
*
* To give M-mode R/W access to the shared memory between M and
* S/U-mode, first entry is reserved. It is disabled at boot.
* When shared memory access is required, the physical address
* should be programmed into the first PMP entry with R/W
* permissions to the M-mode. Once the work is done, it should be
* unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
* pair should be used to map/unmap the shared memory.
*/
#define SBI_SMEPMP_RESV_ENTRY 0
struct sbi_hart_features { struct sbi_hart_features {
bool detected; bool detected;
int priv_version; int priv_version;
@@ -119,9 +128,6 @@ struct sbi_hart_features {
unsigned int mhpm_bits; unsigned int mhpm_bits;
}; };
extern unsigned long hart_features_offset;
#define sbi_hart_features_ptr(__s) sbi_scratch_offset_ptr(__s, hart_features_offset)
struct sbi_scratch; struct sbi_scratch;
int sbi_hart_reinit(struct sbi_scratch *scratch); int sbi_hart_reinit(struct sbi_scratch *scratch);
@@ -132,7 +138,13 @@ extern void (*sbi_hart_expected_trap)(void);
unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch); unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch);
void sbi_hart_delegation_dump(struct sbi_scratch *scratch, void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
const char *prefix, const char *suffix); const char *prefix, const char *suffix);
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch);
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch); unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch);
int sbi_hart_pmp_configure(struct sbi_scratch *scratch);
int sbi_hart_map_saddr(unsigned long base, unsigned long size);
int sbi_hart_unmap_saddr(void);
int sbi_hart_priv_version(struct sbi_scratch *scratch); int sbi_hart_priv_version(struct sbi_scratch *scratch);
void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch, void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch,
char *version_str, int nvstr); char *version_str, int nvstr);

View File

@@ -1,20 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Ventana Micro Systems Inc.
*/
#ifndef __SBI_HART_PMP_H__
#define __SBI_HART_PMP_H__
#include <sbi/sbi_types.h>
struct sbi_scratch;
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch);
bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx);
int sbi_hart_pmp_init(struct sbi_scratch *scratch);
#endif

View File

@@ -1,100 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Ventana Micro Systems Inc.
*/
#ifndef __SBI_HART_PROTECTION_H__
#define __SBI_HART_PROTECTION_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
struct sbi_scratch;
/** Representation of hart protection mechanism */
struct sbi_hart_protection {
/** List head */
struct sbi_dlist head;
/** Name of the hart protection mechanism */
char name[32];
/** Ratings of the hart protection mechanism (higher is better) */
unsigned long rating;
/** Configure protection for current HART (Mandatory) */
int (*configure)(struct sbi_scratch *scratch);
/** Unconfigure protection for current HART (Mandatory) */
void (*unconfigure)(struct sbi_scratch *scratch);
/** Create temporary mapping to access address range on current HART (Optional) */
int (*map_range)(struct sbi_scratch *scratch,
unsigned long base, unsigned long size);
/** Destroy temporary mapping on current HART (Optional) */
int (*unmap_range)(struct sbi_scratch *scratch,
unsigned long base, unsigned long size);
};
/**
* Get the best hart protection mechanism
*
* @return pointer to best hart protection mechanism
*/
struct sbi_hart_protection *sbi_hart_protection_best(void);
/**
* Register a hart protection mechanism
*
* @param hprot pointer to hart protection mechanism
*
* @return 0 on success and negative error code on failure
*/
int sbi_hart_protection_register(struct sbi_hart_protection *hprot);
/**
* Unregister a hart protection mechanism
*
* @param hprot pointer to hart protection mechanism
*/
void sbi_hart_protection_unregister(struct sbi_hart_protection *hprot);
/**
* Configure protection for current HART
*
* @param scratch pointer to scratch space of current HART
*
* @return 0 on success and negative error code on failure
*/
int sbi_hart_protection_configure(struct sbi_scratch *scratch);
/**
* Unconfigure protection for current HART
*
* @param scratch pointer to scratch space of current HART
*/
void sbi_hart_protection_unconfigure(struct sbi_scratch *scratch);
/**
* Create temporary mapping to access address range on current HART
*
* @param base base address of the temporary mapping
* @param size size of the temporary mapping
*
* @return 0 on success and negative error code on failure
*/
int sbi_hart_protection_map_range(unsigned long base, unsigned long size);
/**
* Destroy temporary mapping to access address range on current HART
*
* @param base base address of the temporary mapping
* @param size size of the temporary mapping
*
* @return 0 on success and negative error code on failure
*/
int sbi_hart_protection_unmap_range(unsigned long base, unsigned long size);
#endif /* __SBI_HART_PROTECTION_H__ */

View File

@@ -23,9 +23,6 @@ struct sbi_ipi_device {
/** Name of the IPI device */ /** Name of the IPI device */
char name[32]; char name[32];
/** Ratings of the IPI device (higher is better) */
unsigned long rating;
/** Send IPI to a target HART index */ /** Send IPI to a target HART index */
void (*ipi_send)(u32 hart_index); void (*ipi_send)(u32 hart_index);
@@ -88,13 +85,13 @@ int sbi_ipi_send_halt(ulong hmask, ulong hbase);
void sbi_ipi_process(void); void sbi_ipi_process(void);
int sbi_ipi_raw_send(u32 hartindex, bool all_devices); int sbi_ipi_raw_send(u32 hartindex);
void sbi_ipi_raw_clear(bool all_devices); void sbi_ipi_raw_clear(void);
const struct sbi_ipi_device *sbi_ipi_get_device(void); const struct sbi_ipi_device *sbi_ipi_get_device(void);
void sbi_ipi_add_device(const struct sbi_ipi_device *dev); void sbi_ipi_set_device(const struct sbi_ipi_device *dev);
int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot); int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot);

View File

@@ -173,15 +173,4 @@ static inline void sbi_list_del_init(struct sbi_dlist *entry)
&pos->member != (head); \ &pos->member != (head); \
pos = n, n = sbi_list_entry(pos->member.next, typeof(*pos), member)) pos = n, n = sbi_list_entry(pos->member.next, typeof(*pos), member))
/**
* Iterate over list of given type in reverse order
* @param pos the type * to use as a loop cursor.
* @param head the head for your list.
* @param member the name of the list_struct within the struct.
*/
#define sbi_list_for_each_entry_reverse(pos, head, member) \
for (pos = sbi_list_entry((head)->prev, typeof(*pos), member); \
&pos->member != (head); \
pos = sbi_list_entry(pos->member.prev, typeof(*pos), member))
#endif #endif

View File

@@ -116,6 +116,9 @@ struct sbi_platform_operations {
/** Initialize the platform interrupt controller during cold boot */ /** Initialize the platform interrupt controller during cold boot */
int (*irqchip_init)(void); int (*irqchip_init)(void);
/** Initialize IPI during cold boot */
int (*ipi_init)(void);
/** Get tlb flush limit value **/ /** Get tlb flush limit value **/
u64 (*get_tlbr_flush_limit)(void); u64 (*get_tlbr_flush_limit)(void);
@@ -525,6 +528,20 @@ static inline int sbi_platform_irqchip_init(const struct sbi_platform *plat)
return 0; return 0;
} }
/**
* Initialize the platform IPI support during cold boot
*
* @param plat pointer to struct sbi_platform
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_ipi_init(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->ipi_init)
return sbi_platform_ops(plat)->ipi_init();
return 0;
}
/** /**
* Initialize the platform timer during cold boot * Initialize the platform timer during cold boot
* *

View File

@@ -69,18 +69,11 @@ struct sbi_system_suspend_device {
* return from system_suspend() may ignore this parameter. * return from system_suspend() may ignore this parameter.
*/ */
int (*system_suspend)(u32 sleep_type, unsigned long mmode_resume_addr); int (*system_suspend)(u32 sleep_type, unsigned long mmode_resume_addr);
/**
* Resume the system from system suspend
*/
void (*system_resume)(void);
}; };
const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void); const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void);
void sbi_system_suspend_set_device(struct sbi_system_suspend_device *dev); void sbi_system_suspend_set_device(struct sbi_system_suspend_device *dev);
void sbi_system_suspend_test_enable(void); void sbi_system_suspend_test_enable(void);
void sbi_system_resume(void);
bool sbi_system_is_suspended(void);
bool sbi_system_suspend_supported(u32 sleep_type); bool sbi_system_suspend_supported(u32 sleep_type);
int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque); int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque);

View File

@@ -54,8 +54,6 @@ do { \
#define SBI_TLB_INFO_SIZE sizeof(struct sbi_tlb_info) #define SBI_TLB_INFO_SIZE sizeof(struct sbi_tlb_info)
void __sbi_sfence_vma_all();
int sbi_tlb_request(ulong hmask, ulong hbase, struct sbi_tlb_info *tinfo); int sbi_tlb_request(ulong hmask, ulong hbase, struct sbi_tlb_info *tinfo);
int sbi_tlb_init(struct sbi_scratch *scratch, bool cold_boot); int sbi_tlb_init(struct sbi_scratch *scratch, bool cold_boot);

View File

@@ -14,7 +14,7 @@
/* clang-format off */ /* clang-format off */
typedef signed char s8; typedef char s8;
typedef unsigned char u8; typedef unsigned char u8;
typedef unsigned char uint8_t; typedef unsigned char uint8_t;

View File

@@ -1,69 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#ifndef __CACHE_H__
#define __CACHE_H__
#include <sbi/sbi_list.h>
#include <sbi/sbi_types.h>
#define CACHE_NAME_LEN 32
struct cache_device;
struct cache_ops {
/** Warm init **/
int (*warm_init)(struct cache_device *dev);
/** Flush entire cache **/
int (*cache_flush_all)(struct cache_device *dev);
};
struct cache_device {
/** Name of the device **/
char name[CACHE_NAME_LEN];
/** List node for search **/
struct sbi_dlist node;
/** Point to the next level cache **/
struct cache_device *next;
/** Cache Management Operations **/
struct cache_ops *ops;
/** CPU private cache **/
bool cpu_private;
/** The unique id of this cache device **/
u32 id;
};
/**
* Find a registered cache device
*
* @param id unique ID of the cache device
*
* @return the cache device or NULL
*/
struct cache_device *cache_find(u32 id);
/**
* Register a cache device
*
* cache_device->id must be initialized already and must not change during the life
* of the cache_device object.
*
* @param dev the cache device to register
*
* @return 0 on success, or a negative error code on failure
*/
int cache_add(struct cache_device *dev);
/**
* Flush the entire cache
*
* @param dev the cache to flush
*
* @return 0 on success, or a negative error code on failure
*/
int cache_flush_all(struct cache_device *dev);
#endif

View File

@@ -1,34 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#ifndef __FDT_CACHE_H__
#define __FDT_CACHE_H__
#include <sbi_utils/cache/cache.h>
/**
* Register a cache device using information from the DT
*
* @param fdt devicetree blob
* @param noff offset of a node in the devicetree blob
* @param dev cache device to register for this devicetree node
*
* @return 0 on success, or a negative error code on failure
*/
int fdt_cache_add(const void *fdt, int noff, struct cache_device *dev);
/**
* Get the cache device referencd by the "next-level-cache" property of a DT node
*
* @param fdt devicetree blob
* @param noff offset of a node in the devicetree blob
* @param out_dev location to return the cache device
*
* @return 0 on success, or a negative error code on failure
*/
int fdt_next_cache_get(const void *fdt, int noff, struct cache_device **out_dev);
#endif

View File

@@ -1,40 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#ifndef __FDT_CMO_HELPER_H__
#define __FDT_CMO_HELPER_H__
#ifdef CONFIG_FDT_CACHE
/**
* Flush the private first level cache of the current hart
*
* @return 0 on success, or a negative error code on failure
*/
int fdt_cmo_private_flc_flush_all(void);
/**
* Flush the last level cache of the current hart
*
* @return 0 on success, or a negative error code on failure
*/
int fdt_cmo_llc_flush_all(void);
/**
* Initialize the cache devices for each hart
*
* @param fdt devicetree blob
* @param cold_boot cold init or warm init
*
* @return 0 on success, or a negative error code on failure
*/
int fdt_cmo_init(bool cold_boot);
#else
static inline int fdt_cmo_init(bool cold_boot) { return 0; }
#endif /* CONFIG_FDT_CACHE */
#endif /* __FDT_CMO_HELPER_H__ */

View File

@@ -1,20 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#ifndef __FDT_HSM_SIFIVE_INST_H__
#define __FDT_HSM_SIFIVE_INST_H__
static inline void sifive_cease(void)
{
__asm__ __volatile__(".insn 0x30500073" ::: "memory");
}
static inline void sifive_cflush(void)
{
__asm__ __volatile__(".insn 0xfc000073" ::: "memory");
}
#endif

View File

@@ -1,14 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#ifndef __FDT_HSM_SIFIVE_TMC0_H__
#define __FDT_HSM_SIFIVE_TMC0_H__
int sifive_tmc0_set_wakemask_enareq(u32 hartid);
void sifive_tmc0_set_wakemask_disreq(u32 hartid);
bool sifive_tmc0_is_pg(u32 hartid);
#endif

View File

@@ -0,0 +1,26 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __FDT_IPI_H__
#define __FDT_IPI_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_IPI
int fdt_ipi_init(void);
#else
static inline int fdt_ipi_init(void) { return 0; }
#endif
#endif

View File

@@ -33,7 +33,6 @@ struct aplic_delegate_data {
struct aplic_data { struct aplic_data {
/* Private members */ /* Private members */
struct sbi_irqchip_device irqchip; struct sbi_irqchip_device irqchip;
struct sbi_dlist node;
/* Public members */ /* Public members */
unsigned long addr; unsigned long addr;
unsigned long size; unsigned long size;
@@ -49,6 +48,4 @@ struct aplic_data {
int aplic_cold_irqchip_init(struct aplic_data *aplic); int aplic_cold_irqchip_init(struct aplic_data *aplic);
void aplic_reinit_all(void);
#endif #endif

View File

@@ -216,10 +216,7 @@ enum rpmi_servicegroup_id {
RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004, RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004,
RPMI_SRVGRP_HSM = 0x0005, RPMI_SRVGRP_HSM = 0x0005,
RPMI_SRVGRP_CPPC = 0x0006, RPMI_SRVGRP_CPPC = 0x0006,
RPMI_SRVGRP_VOLTAGE = 0x00007,
RPMI_SRVGRP_CLOCK = 0x0008, RPMI_SRVGRP_CLOCK = 0x0008,
RPMI_SRVGRP_DEVICE_POWER = 0x0009,
RPMI_SRVGRP_PERFORMANCE = 0x0000A,
RPMI_SRVGRP_ID_MAX_COUNT, RPMI_SRVGRP_ID_MAX_COUNT,
/* Reserved range for service groups */ /* Reserved range for service groups */
@@ -614,86 +611,6 @@ struct rpmi_cppc_hart_list_resp {
u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)]; u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)];
}; };
/** RPMI Voltage ServiceGroup Service IDs */
enum rpmi_voltage_service_id {
RPMI_VOLTAGE_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_VOLTAGE_SRV_GET_NUM_DOMAINS = 0x02,
RPMI_VOLTAGE_SRV_GET_ATTRIBUTES = 0x03,
RPMI_VOLTAGE_SRV_GET_SUPPORTED_LEVELS = 0x04,
RPMI_VOLTAGE_SRV_SET_CONFIG = 0x05,
RPMI_VOLTAGE_SRV_GET_CONFIG = 0x06,
RPMI_VOLTAGE_SRV_SET_LEVEL = 0x07,
RPMI_VOLTAGE_SRV_GET_LEVEL = 0x08,
RPMI_VOLTAGE_SRV_MAX_COUNT,
};
struct rpmi_voltage_get_num_domains_resp {
s32 status;
u32 num_domains;
};
struct rpmi_voltage_get_attributes_req {
u32 domain_id;
};
struct rpmi_voltage_get_attributes_resp {
s32 status;
u32 flags;
u32 num_levels;
u32 transition_latency;
u8 name[16];
};
struct rpmi_voltage_get_supported_rate_req {
u32 domain_id;
u32 index;
};
struct rpmi_voltage_get_supported_rate_resp {
s32 status;
u32 flags;
u32 remaining;
u32 returned;
u32 level[0];
};
struct rpmi_voltage_set_config_req {
u32 domain_id;
#define RPMI_CLOCK_CONFIG_ENABLE (1U << 0)
u32 config;
};
struct rpmi_voltage_set_config_resp {
s32 status;
};
struct rpmi_voltage_get_config_req {
u32 domain_id;
};
struct rpmi_voltage_get_config_resp {
s32 status;
u32 config;
};
struct rpmi_voltage_set_level_req {
u32 domain_id;
s32 level;
};
struct rpmi_voltage_set_level_resp {
s32 status;
};
struct rpmi_voltage_get_level_req {
u32 domain_id;
};
struct rpmi_voltage_get_level_resp {
s32 status;
s32 level;
};
/** RPMI Clock ServiceGroup Service IDs */ /** RPMI Clock ServiceGroup Service IDs */
enum rpmi_clock_service_id { enum rpmi_clock_service_id {
RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01, RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01,
@@ -786,165 +703,4 @@ struct rpmi_clock_get_rate_resp {
u32 clock_rate_high; u32 clock_rate_high;
}; };
/** RPMI Device Power ServiceGroup Service IDs */
enum rpmi_dpwr_service_id {
RPMI_DPWR_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_DPWR_SRV_GET_NUM_DOMAINS = 0x02,
RPMI_DPWR_SRV_GET_ATTRIBUTES = 0x03,
RPMI_DPWR_SRV_SET_STATE = 0x04,
RPMI_DPWR_SRV_GET_STATE = 0x05,
RPMI_DPWR_SRV_MAX_COUNT,
};
struct rpmi_dpwr_get_num_domain_resp {
s32 status;
u32 num_domain;
};
struct rpmi_dpwr_get_attrs_req {
u32 domain_id;
};
struct rpmi_dpwr_get_attrs_resp {
s32 status;
u32 flags;
u32 transition_latency;
u8 name[16];
};
struct rpmi_dpwr_set_state_req {
u32 domain_id;
u32 state;
};
struct rpmi_dpwr_set_state_resp {
s32 status;
};
struct rpmi_dpwr_get_state_req {
u32 domain_id;
};
struct rpmi_dpwr_get_state_resp {
s32 status;
u32 state;
};
/** RPMI Performance ServiceGroup Service IDs */
enum rpmi_performance_service_id {
RPMI_PERF_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_PERF_SRV_GET_NUM_DOMAINS = 0x02,
RPMI_PERF_SRV_GET_ATTRIBUTES = 0x03,
RPMI_PERF_SRV_GET_SUPPORTED_LEVELS = 0x04,
RPMI_PERF_SRV_GET_LEVEL = 0x05,
RPMI_PERF_SRV_SET_LEVEL = 0x06,
RPMI_PERF_SRV_GET_LIMIT = 0x07,
RPMI_PERF_SRV_SET_LIMIT = 0x08,
RPMI_PERF_SRV_GET_FAST_CHANNEL_REGION = 0x09,
RPMI_PERF_SRV_GET_FAST_CHANNEL_ATTRIBUTES = 0x0A,
RPMI_PERF_SRV_MAX_COUNT,
};
struct rpmi_perf_get_num_domain_resp {
s32 status;
u32 num_domains;
};
struct rpmi_perf_get_attrs_req {
u32 domain_id;
};
struct rpmi_perf_get_attrs_resp {
s32 status;
u32 flags;
u32 num_level;
u32 latency;
u8 name[16];
};
struct rpmi_perf_get_supported_level_req {
u32 domain_id;
u32 perf_level_index;
};
struct rpmi_perf_domain_level {
u32 level_index;
u32 opp_level;
u32 power_cost_uw;
u32 transition_latency_us;
};
struct rpmi_perf_get_supported_level_resp {
s32 status;
u32 reserve;
u32 remaining;
u32 returned;
struct rpmi_perf_domain_level level[0];
};
struct rpmi_perf_get_level_req {
u32 domain_id;
};
struct rpmi_perf_get_level_resp {
s32 status;
u32 level_index;
};
struct rpmi_perf_set_level_req {
u32 domain_id;
u32 level_index;
};
struct rpmi_perf_set_level_resp {
s32 status;
};
struct rpmi_perf_get_limit_req {
u32 domain_id;
};
struct rpmi_perf_get_limit_resp {
s32 status;
u32 level_index_max;
u32 level_index_min;
};
struct rpmi_perf_set_limit_req {
u32 domain_id;
u32 level_index_max;
u32 level_index_min;
};
struct rpmi_perf_set_limit_resp {
s32 status;
};
struct rpmi_perf_get_fast_chn_region_resp {
s32 status;
u32 region_phy_addr_low;
u32 region_phy_addr_high;
u32 region_size_low;
u32 region_size_high;
};
struct rpmi_perf_get_fast_chn_attr_req {
u32 domain_id;
u32 service_id;
};
struct rpmi_perf_get_fast_chn_attr_resp {
s32 status;
u32 flags;
u32 region_offset_low;
u32 region_offset_high;
u32 region_size;
u32 db_addr_low;
u32 db_addr_high;
u32 db_id_low;
u32 db_id_high;
u32 db_perserved_low;
u32 db_perserved_high;
};
#endif /* !__RPMI_MSGPROT_H__ */ #endif /* !__RPMI_MSGPROT_H__ */

View File

@@ -42,11 +42,6 @@ struct aclint_mtimer_data {
void (*time_wr)(bool timecmp, u64 value, volatile u64 *addr); void (*time_wr)(bool timecmp, u64 value, volatile u64 *addr);
}; };
struct aclint_mtimer_data *aclint_get_mtimer_data(void);
void aclint_mtimer_update(struct aclint_mtimer_data *mt,
struct aclint_mtimer_data *ref);
void aclint_mtimer_sync(struct aclint_mtimer_data *mt); void aclint_mtimer_sync(struct aclint_mtimer_data *mt);
void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt, void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt,

View File

@@ -75,8 +75,6 @@ libsbi-objs-y += sbi_emulate_csr.o
libsbi-objs-y += sbi_fifo.o libsbi-objs-y += sbi_fifo.o
libsbi-objs-y += sbi_fwft.o libsbi-objs-y += sbi_fwft.o
libsbi-objs-y += sbi_hart.o libsbi-objs-y += sbi_hart.o
libsbi-objs-y += sbi_hart_pmp.o
libsbi-objs-y += sbi_hart_protection.o
libsbi-objs-y += sbi_heap.o libsbi-objs-y += sbi_heap.o
libsbi-objs-y += sbi_math.o libsbi-objs-y += sbi_math.o
libsbi-objs-y += sbi_hfence.o libsbi-objs-y += sbi_hfence.o

View File

@@ -93,91 +93,77 @@ void misa_string(int xlen, char *out, unsigned int out_sz)
unsigned long csr_read_num(int csr_num) unsigned long csr_read_num(int csr_num)
{ {
#define switchcase_csr_read(__csr_num) \ #define switchcase_csr_read(__csr_num, __val) \
case __csr_num: \ case __csr_num: \
return csr_read(__csr_num); __val = csr_read(__csr_num); \
#define switchcase_csr_read_2(__csr_num) \ break;
switchcase_csr_read(__csr_num + 0) \ #define switchcase_csr_read_2(__csr_num, __val) \
switchcase_csr_read(__csr_num + 1) switchcase_csr_read(__csr_num + 0, __val) \
#define switchcase_csr_read_4(__csr_num) \ switchcase_csr_read(__csr_num + 1, __val)
switchcase_csr_read_2(__csr_num + 0) \ #define switchcase_csr_read_4(__csr_num, __val) \
switchcase_csr_read_2(__csr_num + 2) switchcase_csr_read_2(__csr_num + 0, __val) \
#define switchcase_csr_read_8(__csr_num) \ switchcase_csr_read_2(__csr_num + 2, __val)
switchcase_csr_read_4(__csr_num + 0) \ #define switchcase_csr_read_8(__csr_num, __val) \
switchcase_csr_read_4(__csr_num + 4) switchcase_csr_read_4(__csr_num + 0, __val) \
#define switchcase_csr_read_16(__csr_num) \ switchcase_csr_read_4(__csr_num + 4, __val)
switchcase_csr_read_8(__csr_num + 0) \ #define switchcase_csr_read_16(__csr_num, __val) \
switchcase_csr_read_8(__csr_num + 8) switchcase_csr_read_8(__csr_num + 0, __val) \
#define switchcase_csr_read_32(__csr_num) \ switchcase_csr_read_8(__csr_num + 8, __val)
switchcase_csr_read_16(__csr_num + 0) \ #define switchcase_csr_read_32(__csr_num, __val) \
switchcase_csr_read_16(__csr_num + 16) switchcase_csr_read_16(__csr_num + 0, __val) \
#define switchcase_csr_read_64(__csr_num) \ switchcase_csr_read_16(__csr_num + 16, __val)
switchcase_csr_read_32(__csr_num + 0) \ #define switchcase_csr_read_64(__csr_num, __val) \
switchcase_csr_read_32(__csr_num + 32) switchcase_csr_read_32(__csr_num + 0, __val) \
#define switchcase_csr_read_128(__csr_num) \ switchcase_csr_read_32(__csr_num + 32, __val)
switchcase_csr_read_64(__csr_num + 0) \
switchcase_csr_read_64(__csr_num + 64) unsigned long ret = 0;
#define switchcase_csr_read_256(__csr_num) \
switchcase_csr_read_128(__csr_num + 0) \
switchcase_csr_read_128(__csr_num + 128)
switch (csr_num) { switch (csr_num) {
switchcase_csr_read_16(CSR_PMPCFG0) switchcase_csr_read_16(CSR_PMPCFG0, ret)
switchcase_csr_read_64(CSR_PMPADDR0) switchcase_csr_read_64(CSR_PMPADDR0, ret)
switchcase_csr_read(CSR_MCYCLE) switchcase_csr_read(CSR_MCYCLE, ret)
switchcase_csr_read(CSR_MINSTRET) switchcase_csr_read(CSR_MINSTRET, ret)
switchcase_csr_read(CSR_MHPMCOUNTER3) switchcase_csr_read(CSR_MHPMCOUNTER3, ret)
switchcase_csr_read_4(CSR_MHPMCOUNTER4) switchcase_csr_read_4(CSR_MHPMCOUNTER4, ret)
switchcase_csr_read_8(CSR_MHPMCOUNTER8) switchcase_csr_read_8(CSR_MHPMCOUNTER8, ret)
switchcase_csr_read_16(CSR_MHPMCOUNTER16) switchcase_csr_read_16(CSR_MHPMCOUNTER16, ret)
switchcase_csr_read(CSR_MCOUNTINHIBIT) switchcase_csr_read(CSR_MCOUNTINHIBIT, ret)
switchcase_csr_read(CSR_MCYCLECFG) switchcase_csr_read(CSR_MCYCLECFG, ret)
switchcase_csr_read(CSR_MINSTRETCFG) switchcase_csr_read(CSR_MINSTRETCFG, ret)
switchcase_csr_read(CSR_MHPMEVENT3) switchcase_csr_read(CSR_MHPMEVENT3, ret)
switchcase_csr_read_4(CSR_MHPMEVENT4) switchcase_csr_read_4(CSR_MHPMEVENT4, ret)
switchcase_csr_read_8(CSR_MHPMEVENT8) switchcase_csr_read_8(CSR_MHPMEVENT8, ret)
switchcase_csr_read_16(CSR_MHPMEVENT16) switchcase_csr_read_16(CSR_MHPMEVENT16, ret)
#if __riscv_xlen == 32 #if __riscv_xlen == 32
switchcase_csr_read(CSR_MCYCLEH) switchcase_csr_read(CSR_MCYCLEH, ret)
switchcase_csr_read(CSR_MINSTRETH) switchcase_csr_read(CSR_MINSTRETH, ret)
switchcase_csr_read(CSR_MHPMCOUNTER3H) switchcase_csr_read(CSR_MHPMCOUNTER3H, ret)
switchcase_csr_read_4(CSR_MHPMCOUNTER4H) switchcase_csr_read_4(CSR_MHPMCOUNTER4H, ret)
switchcase_csr_read_8(CSR_MHPMCOUNTER8H) switchcase_csr_read_8(CSR_MHPMCOUNTER8H, ret)
switchcase_csr_read_16(CSR_MHPMCOUNTER16H) switchcase_csr_read_16(CSR_MHPMCOUNTER16H, ret)
/** /**
* The CSR range M[CYCLE, INSTRET]CFGH are available only if smcntrpmf * The CSR range M[CYCLE, INSTRET]CFGH are available only if smcntrpmf
* extension is present. The caller must ensure that. * extension is present. The caller must ensure that.
*/ */
switchcase_csr_read(CSR_MCYCLECFGH) switchcase_csr_read(CSR_MCYCLECFGH, ret)
switchcase_csr_read(CSR_MINSTRETCFGH) switchcase_csr_read(CSR_MINSTRETCFGH, ret)
/** /**
* The CSR range MHPMEVENT[3-16]H are available only if sscofpmf * The CSR range MHPMEVENT[3-16]H are available only if sscofpmf
* extension is present. The caller must ensure that. * extension is present. The caller must ensure that.
*/ */
switchcase_csr_read(CSR_MHPMEVENT3H) switchcase_csr_read(CSR_MHPMEVENT3H, ret)
switchcase_csr_read_4(CSR_MHPMEVENT4H) switchcase_csr_read_4(CSR_MHPMEVENT4H, ret)
switchcase_csr_read_8(CSR_MHPMEVENT8H) switchcase_csr_read_8(CSR_MHPMEVENT8H, ret)
switchcase_csr_read_16(CSR_MHPMEVENT16H) switchcase_csr_read_16(CSR_MHPMEVENT16H, ret)
#endif #endif
switchcase_csr_read_256(CSR_CUSTOM0_U_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM1_U_RO_BASE)
switchcase_csr_read_64(CSR_CUSTOM2_S_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM3_S_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM4_S_RO_BASE)
switchcase_csr_read_64(CSR_CUSTOM5_HS_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM6_HS_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM7_HS_RO_BASE)
switchcase_csr_read_64(CSR_CUSTOM8_M_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM9_M_RW_BASE)
switchcase_csr_read_64(CSR_CUSTOM10_M_RO_BASE)
default: default:
sbi_panic("%s: Unknown CSR %#x", __func__, csr_num); sbi_panic("%s: Unknown CSR %#x", __func__, csr_num);
return 0; break;
} }
#undef switchcase_csr_read_256 return ret;
#undef switchcase_csr_read_128
#undef switchcase_csr_read_64 #undef switchcase_csr_read_64
#undef switchcase_csr_read_32 #undef switchcase_csr_read_32
#undef switchcase_csr_read_16 #undef switchcase_csr_read_16
@@ -211,12 +197,6 @@ void csr_write_num(int csr_num, unsigned long val)
#define switchcase_csr_write_64(__csr_num, __val) \ #define switchcase_csr_write_64(__csr_num, __val) \
switchcase_csr_write_32(__csr_num + 0, __val) \ switchcase_csr_write_32(__csr_num + 0, __val) \
switchcase_csr_write_32(__csr_num + 32, __val) switchcase_csr_write_32(__csr_num + 32, __val)
#define switchcase_csr_write_128(__csr_num, __val) \
switchcase_csr_write_64(__csr_num + 0, __val) \
switchcase_csr_write_64(__csr_num + 64, __val)
#define switchcase_csr_write_256(__csr_num, __val) \
switchcase_csr_write_128(__csr_num + 0, __val) \
switchcase_csr_write_128(__csr_num + 128, __val)
switch (csr_num) { switch (csr_num) {
switchcase_csr_write_16(CSR_PMPCFG0, val) switchcase_csr_write_16(CSR_PMPCFG0, val)
@@ -248,21 +228,12 @@ void csr_write_num(int csr_num, unsigned long val)
switchcase_csr_write_4(CSR_MHPMEVENT4, val) switchcase_csr_write_4(CSR_MHPMEVENT4, val)
switchcase_csr_write_8(CSR_MHPMEVENT8, val) switchcase_csr_write_8(CSR_MHPMEVENT8, val)
switchcase_csr_write_16(CSR_MHPMEVENT16, val) switchcase_csr_write_16(CSR_MHPMEVENT16, val)
switchcase_csr_write_256(CSR_CUSTOM0_U_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM2_S_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM3_S_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM5_HS_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM6_HS_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM8_M_RW_BASE, val)
switchcase_csr_write_64(CSR_CUSTOM9_M_RW_BASE, val)
default: default:
sbi_panic("%s: Unknown CSR %#x", __func__, csr_num); sbi_panic("%s: Unknown CSR %#x", __func__, csr_num);
break; break;
} }
#undef switchcase_csr_write_256
#undef switchcase_csr_write_128
#undef switchcase_csr_write_64 #undef switchcase_csr_write_64
#undef switchcase_csr_write_32 #undef switchcase_csr_write_32
#undef switchcase_csr_write_16 #undef switchcase_csr_write_16

View File

@@ -16,7 +16,6 @@
#include <sbi/sbi_trap.h> #include <sbi/sbi_trap.h>
#include <sbi/sbi_dbtr.h> #include <sbi/sbi_dbtr.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/riscv_encoding.h> #include <sbi/riscv_encoding.h>
#include <sbi/riscv_asm.h> #include <sbi/riscv_asm.h>
@@ -337,19 +336,6 @@ static void dbtr_trigger_setup(struct sbi_dbtr_trigger *trig,
if (__test_bit(RV_DBTR_BIT(MC6, VS), &tdata1)) if (__test_bit(RV_DBTR_BIT(MC6, VS), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state); __set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
break; break;
case RISCV_DBTR_TRIG_ICOUNT:
if (__test_bit(RV_DBTR_BIT(ICOUNT, U), &tdata1))
__set_bit(RV_DBTR_BIT(TS, U), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, S), &tdata1))
__set_bit(RV_DBTR_BIT(TS, S), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, VU), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VU), &trig->state);
if (__test_bit(RV_DBTR_BIT(ICOUNT, VS), &tdata1))
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
break;
default: default:
sbi_dprintf("%s: Unknown type (tdata1: 0x%lx Type: %ld)\n", sbi_dprintf("%s: Unknown type (tdata1: 0x%lx Type: %ld)\n",
__func__, tdata1, TDATA1_GET_TYPE(tdata1)); __func__, tdata1, TDATA1_GET_TYPE(tdata1));
@@ -393,16 +379,6 @@ static void dbtr_trigger_enable(struct sbi_dbtr_trigger *trig)
update_bit(state & RV_DBTR_BIT_MASK(TS, S), update_bit(state & RV_DBTR_BIT_MASK(TS, S),
RV_DBTR_BIT(MC6, S), &trig->tdata1); RV_DBTR_BIT(MC6, S), &trig->tdata1);
break; break;
case RISCV_DBTR_TRIG_ICOUNT:
update_bit(state & RV_DBTR_BIT_MASK(TS, VU),
RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, VS),
RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, U),
RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
update_bit(state & RV_DBTR_BIT_MASK(TS, S),
RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
break;
default: default:
break; break;
} }
@@ -442,12 +418,6 @@ static void dbtr_trigger_disable(struct sbi_dbtr_trigger *trig)
__clear_bit(RV_DBTR_BIT(MC6, U), &trig->tdata1); __clear_bit(RV_DBTR_BIT(MC6, U), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(MC6, S), &trig->tdata1); __clear_bit(RV_DBTR_BIT(MC6, S), &trig->tdata1);
break; break;
case RISCV_DBTR_TRIG_ICOUNT:
__clear_bit(RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
__clear_bit(RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
break;
default: default:
break; break;
} }
@@ -471,7 +441,6 @@ static int dbtr_trigger_supported(unsigned long type)
switch (type) { switch (type) {
case RISCV_DBTR_TRIG_MCONTROL: case RISCV_DBTR_TRIG_MCONTROL:
case RISCV_DBTR_TRIG_MCONTROL6: case RISCV_DBTR_TRIG_MCONTROL6:
case RISCV_DBTR_TRIG_ICOUNT:
return 1; return 1;
default: default:
break; break;
@@ -493,11 +462,6 @@ static int dbtr_trigger_valid(unsigned long type, unsigned long tdata)
!(tdata & RV_DBTR_BIT_MASK(MC6, M))) !(tdata & RV_DBTR_BIT_MASK(MC6, M)))
return 1; return 1;
break; break;
case RISCV_DBTR_TRIG_ICOUNT:
if (!(tdata & RV_DBTR_BIT_MASK(ICOUNT, DMODE)) &&
!(tdata & RV_DBTR_BIT_MASK(ICOUNT, M)))
return 1;
break;
default: default:
break; break;
} }
@@ -559,22 +523,17 @@ int sbi_dbtr_read_trig(unsigned long smode,
shmem_base = hart_shmem_base(hs); shmem_base = hart_shmem_base(hs);
sbi_hart_protection_map_range((unsigned long)shmem_base, sbi_hart_map_saddr((unsigned long)shmem_base,
trig_count * sizeof(*entry)); trig_count * sizeof(*entry));
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) { for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
xmit = &entry->data; xmit = &entry->data;
trig = INDEX_TO_TRIGGER((_idx + trig_idx_base)); trig = INDEX_TO_TRIGGER((_idx + trig_idx_base));
csr_write(CSR_TSELECT, trig->index);
trig->tdata1 = csr_read(CSR_TDATA1);
trig->tdata2 = csr_read(CSR_TDATA2);
trig->tdata3 = csr_read(CSR_TDATA3);
xmit->tstate = cpu_to_lle(trig->state); xmit->tstate = cpu_to_lle(trig->state);
xmit->tdata1 = cpu_to_lle(trig->tdata1); xmit->tdata1 = cpu_to_lle(trig->tdata1);
xmit->tdata2 = cpu_to_lle(trig->tdata2); xmit->tdata2 = cpu_to_lle(trig->tdata2);
xmit->tdata3 = cpu_to_lle(trig->tdata3); xmit->tdata3 = cpu_to_lle(trig->tdata3);
} }
sbi_hart_protection_unmap_range((unsigned long)shmem_base, sbi_hart_unmap_saddr();
trig_count * sizeof(*entry));
return SBI_SUCCESS; return SBI_SUCCESS;
} }
@@ -598,8 +557,8 @@ int sbi_dbtr_install_trig(unsigned long smode,
return SBI_ERR_NO_SHMEM; return SBI_ERR_NO_SHMEM;
shmem_base = hart_shmem_base(hs); shmem_base = hart_shmem_base(hs);
sbi_hart_protection_map_range((unsigned long)shmem_base, sbi_hart_map_saddr((unsigned long)shmem_base,
trig_count * sizeof(*entry)); trig_count * sizeof(*entry));
/* Check requested triggers configuration */ /* Check requested triggers configuration */
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) { for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
@@ -608,23 +567,20 @@ int sbi_dbtr_install_trig(unsigned long smode,
if (!dbtr_trigger_supported(TDATA1_GET_TYPE(ctrl))) { if (!dbtr_trigger_supported(TDATA1_GET_TYPE(ctrl))) {
*out = _idx; *out = _idx;
sbi_hart_protection_unmap_range((unsigned long)shmem_base, sbi_hart_unmap_saddr();
trig_count * sizeof(*entry));
return SBI_ERR_FAILED; return SBI_ERR_FAILED;
} }
if (!dbtr_trigger_valid(TDATA1_GET_TYPE(ctrl), ctrl)) { if (!dbtr_trigger_valid(TDATA1_GET_TYPE(ctrl), ctrl)) {
*out = _idx; *out = _idx;
sbi_hart_protection_unmap_range((unsigned long)shmem_base, sbi_hart_unmap_saddr();
trig_count * sizeof(*entry));
return SBI_ERR_FAILED; return SBI_ERR_FAILED;
} }
} }
if (hs->available_trigs < trig_count) { if (hs->available_trigs < trig_count) {
*out = hs->available_trigs; *out = hs->available_trigs;
sbi_hart_protection_unmap_range((unsigned long)shmem_base, sbi_hart_unmap_saddr();
trig_count * sizeof(*entry));
return SBI_ERR_FAILED; return SBI_ERR_FAILED;
} }
@@ -644,9 +600,7 @@ int sbi_dbtr_install_trig(unsigned long smode,
xmit->idx = cpu_to_lle(trig->index); xmit->idx = cpu_to_lle(trig->index);
} }
sbi_hart_unmap_saddr();
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
trig_count * sizeof(*entry));
return SBI_SUCCESS; return SBI_SUCCESS;
} }
@@ -719,23 +673,23 @@ int sbi_dbtr_update_trig(unsigned long smode,
return SBI_ERR_BAD_RANGE; return SBI_ERR_BAD_RANGE;
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) { for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
sbi_hart_protection_map_range((unsigned long)entry, sizeof(*entry)); sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
trig_idx = entry->id.idx; trig_idx = entry->id.idx;
if (trig_idx >= hs->total_trigs) { if (trig_idx >= hs->total_trigs) {
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry)); sbi_hart_unmap_saddr();
return SBI_ERR_INVALID_PARAM; return SBI_ERR_INVALID_PARAM;
} }
trig = INDEX_TO_TRIGGER(trig_idx); trig = INDEX_TO_TRIGGER(trig_idx);
if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED))) { if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED))) {
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry)); sbi_hart_unmap_saddr();
return SBI_ERR_FAILED; return SBI_ERR_FAILED;
} }
dbtr_trigger_setup(trig, &entry->data); dbtr_trigger_setup(trig, &entry->data);
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry)); sbi_hart_unmap_saddr();
dbtr_trigger_enable(trig); dbtr_trigger_enable(trig);
} }

View File

@@ -25,6 +25,7 @@ static u32 domain_count = 0;
static bool domain_finalized = false; static bool domain_finalized = false;
#define ROOT_REGION_MAX 32 #define ROOT_REGION_MAX 32
static u32 root_memregs_count = 0;
struct sbi_domain root = { struct sbi_domain root = {
.name = "root", .name = "root",
@@ -121,80 +122,6 @@ void sbi_domain_memregion_init(unsigned long addr,
} }
} }
unsigned int sbi_domain_get_smepmp_flags(struct sbi_domain_memregion *reg)
{
unsigned int pmp_flags = 0;
unsigned long rstart, rend;
if ((reg->flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == 0) {
/*
* Region is inaccessible in all privilege modes.
*
* SmePMP allows two encodings for an inaccessible region:
* - pmpcfg.LRWX = 0000 (Inaccessible region)
* - pmpcfg.LRWX = 1000 (Locked inaccessible region)
* We use the first encoding here.
*/
return 0;
} else if (SBI_DOMAIN_MEMREGION_IS_SHARED(reg->flags)) {
/* Read only for both M and SU modes */
if (SBI_DOMAIN_MEMREGION_IS_SUR_MR(reg->flags))
pmp_flags = (PMP_L | PMP_R | PMP_W | PMP_X);
/* Execute for SU but Read/Execute for M mode */
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MRX(reg->flags))
/* locked region */
pmp_flags = (PMP_L | PMP_W | PMP_X);
/* Execute only for both M and SU modes */
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MX(reg->flags))
pmp_flags = (PMP_L | PMP_W);
/* Read/Write for both M and SU modes */
else if (SBI_DOMAIN_MEMREGION_IS_SURW_MRW(reg->flags))
pmp_flags = (PMP_W | PMP_X);
/* Read only for SU mode but Read/Write for M mode */
else if (SBI_DOMAIN_MEMREGION_IS_SUR_MRW(reg->flags))
pmp_flags = (PMP_W);
} else if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
/*
* When smepmp is supported and used, M region cannot have RWX
* permissions on any region.
*/
if ((reg->flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK)
== SBI_DOMAIN_MEMREGION_M_RWX) {
sbi_printf("%s: M-mode only regions cannot have"
"RWX permissions\n", __func__);
return 0;
}
/* M-mode only access regions are always locked */
pmp_flags |= PMP_L;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
pmp_flags |= PMP_X;
} else if (SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(reg->flags)) {
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
pmp_flags |= PMP_X;
} else {
rstart = reg->base;
rend = (reg->order < __riscv_xlen) ? rstart + ((1UL << reg->order) - 1) : -1UL;
sbi_printf("%s: Unsupported Smepmp permissions on region 0x%"PRILX"-0x%"PRILX"\n",
__func__, rstart, rend);
}
return pmp_flags;
}
bool sbi_domain_check_addr(const struct sbi_domain *dom, bool sbi_domain_check_addr(const struct sbi_domain *dom,
unsigned long addr, unsigned long mode, unsigned long addr, unsigned long mode,
unsigned long access_flags) unsigned long access_flags)
@@ -235,11 +162,7 @@ bool sbi_domain_check_addr(const struct sbi_domain *dom,
rstart + ((1UL << reg->order) - 1) : -1UL; rstart + ((1UL << reg->order) - 1) : -1UL;
if (rstart <= addr && addr <= rend) { if (rstart <= addr && addr <= rend) {
rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false; rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
/* if (mmio != rmmio)
* MMIO devices may appear in regions without the flag set (such as the
* default region), but MMIO device regions should not be used as memory.
*/
if (!mmio && rmmio)
return false; return false;
return ((rrwx & rwx) == rwx) ? true : false; return ((rrwx & rwx) == rwx) ? true : false;
} }
@@ -295,19 +218,6 @@ static bool is_region_compatible(const struct sbi_domain_memregion *regA,
static bool is_region_before(const struct sbi_domain_memregion *regA, static bool is_region_before(const struct sbi_domain_memregion *regA,
const struct sbi_domain_memregion *regB) const struct sbi_domain_memregion *regB)
{ {
/*
* Enforce firmware region ordering for memory access
* under SmePMP.
* Place firmware regions first to ensure consistent
* PMP entries during domain context switches.
*/
if (SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regA->flags) &&
!SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regB->flags))
return true;
if (!SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regA->flags) &&
SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regB->flags))
return false;
if (regA->order < regB->order) if (regA->order < regB->order)
return true; return true;
@@ -371,17 +281,6 @@ static void clear_region(struct sbi_domain_memregion* reg)
sbi_memset(reg, 0x0, sizeof(*reg)); sbi_memset(reg, 0x0, sizeof(*reg));
} }
static int sbi_domain_used_memregions(const struct sbi_domain *dom)
{
int count = 0;
struct sbi_domain_memregion *reg;
sbi_domain_for_each_memregion(dom, reg)
count++;
return count;
}
static int sanitize_domain(struct sbi_domain *dom) static int sanitize_domain(struct sbi_domain *dom)
{ {
u32 i, j, count; u32 i, j, count;
@@ -420,7 +319,9 @@ static int sanitize_domain(struct sbi_domain *dom)
} }
/* Count memory regions */ /* Count memory regions */
count = sbi_domain_used_memregions(dom); count = 0;
sbi_domain_for_each_memregion(dom, reg)
count++;
/* Check presence of firmware regions */ /* Check presence of firmware regions */
if (!dom->fw_region_inited) { if (!dom->fw_region_inited) {
@@ -443,7 +344,7 @@ static int sanitize_domain(struct sbi_domain *dom)
} }
/* Remove covered regions */ /* Remove covered regions */
for (i = 0; i < (count - 1);) { while(i < (count - 1)) {
is_covered = false; is_covered = false;
reg = &dom->regions[i]; reg = &dom->regions[i];
@@ -563,8 +464,6 @@ void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
sbi_printf("M: "); sbi_printf("M: ");
if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO) if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
sbi_printf("%cI", (k++) ? ',' : '('); sbi_printf("%cI", (k++) ? ',' : '(');
if (reg->flags & SBI_DOMAIN_MEMREGION_FW)
sbi_printf("%cF", (k++) ? ',' : '(');
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE) if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
sbi_printf("%cR", (k++) ? ',' : '('); sbi_printf("%cR", (k++) ? ',' : '(');
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE) if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
@@ -704,7 +603,6 @@ static int root_add_memregion(const struct sbi_domain_memregion *reg)
int rc; int rc;
bool reg_merged; bool reg_merged;
struct sbi_domain_memregion *nreg, *nreg1, *nreg2; struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
int root_memregs_count = sbi_domain_used_memregions(&root);
/* Sanity checks */ /* Sanity checks */
if (!reg || domain_finalized || !root.regions || if (!reg || domain_finalized || !root.regions ||
@@ -875,7 +773,6 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
int rc; int rc;
struct sbi_hartmask *root_hmask; struct sbi_hartmask *root_hmask;
struct sbi_domain_memregion *root_memregs; struct sbi_domain_memregion *root_memregs;
int root_memregs_count = 0;
SBI_INIT_LIST_HEAD(&domain_list); SBI_INIT_LIST_HEAD(&domain_list);
@@ -920,15 +817,13 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
/* Root domain firmware memory region */ /* Root domain firmware memory region */
sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset, sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
(SBI_DOMAIN_MEMREGION_M_READABLE | (SBI_DOMAIN_MEMREGION_M_READABLE |
SBI_DOMAIN_MEMREGION_M_EXECUTABLE | SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
SBI_DOMAIN_MEMREGION_FW),
&root_memregs[root_memregs_count++]); &root_memregs[root_memregs_count++]);
sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset), sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
(scratch->fw_size - scratch->fw_rw_offset), (scratch->fw_size - scratch->fw_rw_offset),
(SBI_DOMAIN_MEMREGION_M_READABLE | (SBI_DOMAIN_MEMREGION_M_READABLE |
SBI_DOMAIN_MEMREGION_M_WRITABLE | SBI_DOMAIN_MEMREGION_M_WRITABLE),
SBI_DOMAIN_MEMREGION_FW),
&root_memregs[root_memregs_count++]); &root_memregs[root_memregs_count++]);
root.fw_region_inited = true; root.fw_region_inited = true;

View File

@@ -10,7 +10,6 @@
#include <sbi/sbi_console.h> #include <sbi/sbi_console.h>
#include <sbi/sbi_hsm.h> #include <sbi/sbi_hsm.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_scratch.h> #include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h> #include <sbi/sbi_string.h>
@@ -46,8 +45,6 @@ struct hart_context {
unsigned long scounteren; unsigned long scounteren;
/** Supervisor environment configuration register */ /** Supervisor environment configuration register */
unsigned long senvcfg; unsigned long senvcfg;
/** Supervisor resource management configuration register */
unsigned long srmcfg;
/** Reference to the owning domain */ /** Reference to the owning domain */
struct sbi_domain *dom; struct sbi_domain *dom;
@@ -95,22 +92,17 @@ static void hart_context_set(struct sbi_domain *dom, u32 hartindex,
* *
* @param ctx pointer to the current HART context * @param ctx pointer to the current HART context
* @param dom_ctx pointer to the target domain context * @param dom_ctx pointer to the target domain context
*
* @return 0 on success and negative error code on failure
*/ */
static int switch_to_next_domain_context(struct hart_context *ctx, static void switch_to_next_domain_context(struct hart_context *ctx,
struct hart_context *dom_ctx) struct hart_context *dom_ctx)
{ {
u32 hartindex = current_hartindex(); u32 hartindex = current_hartindex();
struct sbi_trap_context *trap_ctx; struct sbi_trap_context *trap_ctx;
struct sbi_domain *current_dom, *target_dom; struct sbi_domain *current_dom = ctx->dom;
struct sbi_domain *target_dom = dom_ctx->dom;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
unsigned int pmp_count = sbi_hart_pmp_count(scratch);
if (!ctx || !dom_ctx || ctx == dom_ctx)
return SBI_EINVAL;
current_dom = ctx->dom;
target_dom = dom_ctx->dom;
/* Assign current hart to target domain */ /* Assign current hart to target domain */
spin_lock(&current_dom->assigned_harts_lock); spin_lock(&current_dom->assigned_harts_lock);
sbi_hartmask_clear_hartindex(hartindex, &current_dom->assigned_harts); sbi_hartmask_clear_hartindex(hartindex, &current_dom->assigned_harts);
@@ -123,8 +115,11 @@ static int switch_to_next_domain_context(struct hart_context *ctx,
spin_unlock(&target_dom->assigned_harts_lock); spin_unlock(&target_dom->assigned_harts_lock);
/* Reconfigure PMP settings for the new domain */ /* Reconfigure PMP settings for the new domain */
sbi_hart_protection_unconfigure(scratch); for (int i = 0; i < pmp_count; i++) {
sbi_hart_protection_configure(scratch); sbi_platform_pmp_disable(sbi_platform_thishart_ptr(), i);
pmp_disable(i);
}
sbi_hart_pmp_configure(scratch);
/* Save current CSR context and restore target domain's CSR context */ /* Save current CSR context and restore target domain's CSR context */
ctx->sstatus = csr_swap(CSR_SSTATUS, dom_ctx->sstatus); ctx->sstatus = csr_swap(CSR_SSTATUS, dom_ctx->sstatus);
@@ -140,8 +135,6 @@ static int switch_to_next_domain_context(struct hart_context *ctx,
ctx->scounteren = csr_swap(CSR_SCOUNTEREN, dom_ctx->scounteren); ctx->scounteren = csr_swap(CSR_SCOUNTEREN, dom_ctx->scounteren);
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12)
ctx->senvcfg = csr_swap(CSR_SENVCFG, dom_ctx->senvcfg); ctx->senvcfg = csr_swap(CSR_SENVCFG, dom_ctx->senvcfg);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSQOSID))
ctx->srmcfg = csr_swap(CSR_SRMCFG, dom_ctx->srmcfg);
/* Save current trap state and restore target domain's trap state */ /* Save current trap state and restore target domain's trap state */
trap_ctx = sbi_trap_get_context(scratch); trap_ctx = sbi_trap_get_context(scratch);
@@ -163,57 +156,13 @@ static int switch_to_next_domain_context(struct hart_context *ctx,
else else
sbi_hsm_hart_stop(scratch, true); sbi_hsm_hart_stop(scratch, true);
} }
return 0;
}
static int hart_context_init(u32 hartindex)
{
struct hart_context *ctx;
struct sbi_domain *dom;
sbi_domain_for_each(dom) {
if (!sbi_hartmask_test_hartindex(hartindex,
dom->possible_harts))
continue;
ctx = sbi_zalloc(sizeof(struct hart_context));
if (!ctx)
return SBI_ENOMEM;
/* Bind context and domain */
ctx->dom = dom;
hart_context_set(dom, hartindex, ctx);
}
return 0;
} }
int sbi_domain_context_enter(struct sbi_domain *dom) int sbi_domain_context_enter(struct sbi_domain *dom)
{ {
int rc;
struct hart_context *dom_ctx;
struct hart_context *ctx = hart_context_thishart_get(); struct hart_context *ctx = hart_context_thishart_get();
struct hart_context *dom_ctx = hart_context_get(dom, current_hartindex());
/* Target domain must not be same as the current domain */
if (!dom || dom == sbi_domain_thishart_ptr())
return SBI_EINVAL;
/*
* If it's first time to call `enter` on the current hart, no
* context allocated before. Allocate context for each valid
* domain on the current hart.
*/
if (!ctx) {
rc = hart_context_init(current_hartindex());
if (rc)
return rc;
ctx = hart_context_thishart_get();
if (!ctx)
return SBI_EINVAL;
}
dom_ctx = hart_context_get(dom, current_hartindex());
/* Validate the domain context existence */ /* Validate the domain context existence */
if (!dom_ctx) if (!dom_ctx)
return SBI_EINVAL; return SBI_EINVAL;
@@ -221,12 +170,13 @@ int sbi_domain_context_enter(struct sbi_domain *dom)
/* Update target context's previous context to indicate the caller */ /* Update target context's previous context to indicate the caller */
dom_ctx->prev_ctx = ctx; dom_ctx->prev_ctx = ctx;
return switch_to_next_domain_context(ctx, dom_ctx); switch_to_next_domain_context(ctx, dom_ctx);
return 0;
} }
int sbi_domain_context_exit(void) int sbi_domain_context_exit(void)
{ {
int rc;
u32 hartindex = current_hartindex(); u32 hartindex = current_hartindex();
struct sbi_domain *dom; struct sbi_domain *dom;
struct hart_context *ctx = hart_context_thishart_get(); struct hart_context *ctx = hart_context_thishart_get();
@@ -238,13 +188,21 @@ int sbi_domain_context_exit(void)
* its context on the current hart if valid. * its context on the current hart if valid.
*/ */
if (!ctx) { if (!ctx) {
rc = hart_context_init(current_hartindex()); sbi_domain_for_each(dom) {
if (rc) if (!sbi_hartmask_test_hartindex(hartindex,
return rc; dom->possible_harts))
continue;
dom_ctx = sbi_zalloc(sizeof(struct hart_context));
if (!dom_ctx)
return SBI_ENOMEM;
/* Bind context and domain */
dom_ctx->dom = dom;
hart_context_set(dom, hartindex, dom_ctx);
}
ctx = hart_context_thishart_get(); ctx = hart_context_thishart_get();
if (!ctx)
return SBI_EINVAL;
} }
dom_ctx = ctx->prev_ctx; dom_ctx = ctx->prev_ctx;
@@ -268,7 +226,9 @@ int sbi_domain_context_exit(void)
if (!dom_ctx) if (!dom_ctx)
dom_ctx = hart_context_get(&root, hartindex); dom_ctx = hart_context_get(&root, hartindex);
return switch_to_next_domain_context(ctx, dom_ctx); switch_to_next_domain_context(ctx, dom_ctx);
return 0;
} }
int sbi_domain_context_init(void) int sbi_domain_context_init(void)

View File

@@ -14,7 +14,7 @@
#include <sbi/sbi_ecall_interface.h> #include <sbi/sbi_ecall_interface.h>
#include <sbi/sbi_trap.h> #include <sbi/sbi_trap.h>
#include <sbi/riscv_asm.h> #include <sbi/riscv_asm.h>
#include <sbi/sbi_hart_protection.h> #include <sbi/sbi_hart.h>
static int sbi_ecall_dbcn_handler(unsigned long extid, unsigned long funcid, static int sbi_ecall_dbcn_handler(unsigned long extid, unsigned long funcid,
struct sbi_trap_regs *regs, struct sbi_trap_regs *regs,
@@ -46,12 +46,12 @@ static int sbi_ecall_dbcn_handler(unsigned long extid, unsigned long funcid,
regs->a1, regs->a0, smode, regs->a1, regs->a0, smode,
SBI_DOMAIN_READ|SBI_DOMAIN_WRITE)) SBI_DOMAIN_READ|SBI_DOMAIN_WRITE))
return SBI_ERR_INVALID_PARAM; return SBI_ERR_INVALID_PARAM;
sbi_hart_protection_map_range(regs->a1, regs->a0); sbi_hart_map_saddr(regs->a1, regs->a0);
if (funcid == SBI_EXT_DBCN_CONSOLE_WRITE) if (funcid == SBI_EXT_DBCN_CONSOLE_WRITE)
out->value = sbi_nputs((const char *)regs->a1, regs->a0); out->value = sbi_nputs((const char *)regs->a1, regs->a0);
else else
out->value = sbi_ngets((char *)regs->a1, regs->a0); out->value = sbi_ngets((char *)regs->a1, regs->a0);
sbi_hart_protection_unmap_range(regs->a1, regs->a0); sbi_hart_unmap_saddr();
return 0; return 0;
case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
sbi_putc(regs->a0); sbi_putc(regs->a0);

View File

@@ -13,10 +13,8 @@
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_hfence.h>
#include <sbi/sbi_scratch.h> #include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h> #include <sbi/sbi_string.h>
#include <sbi/sbi_tlb.h>
#include <sbi/sbi_types.h> #include <sbi/sbi_types.h>
#include <sbi/riscv_asm.h> #include <sbi/riscv_asm.h>
@@ -169,16 +167,7 @@ static int fwft_adue_supported(struct fwft_config *conf)
static int fwft_set_adue(struct fwft_config *conf, unsigned long value) static int fwft_set_adue(struct fwft_config *conf, unsigned long value)
{ {
int res = fwft_menvcfg_set_bit(value, ENVCFG_ADUE_SHIFT); return fwft_menvcfg_set_bit(value, ENVCFG_ADUE_SHIFT);
if (res == SBI_OK) {
__sbi_sfence_vma_all();
if (misa_extension('H'))
__sbi_hfence_gvma_all();
}
return res;
} }
static int fwft_get_adue(struct fwft_config *conf, unsigned long *value) static int fwft_get_adue(struct fwft_config *conf, unsigned long *value)

View File

@@ -13,21 +13,23 @@
#include <sbi/riscv_fp.h> #include <sbi/riscv_fp.h>
#include <sbi/sbi_bitops.h> #include <sbi/sbi_bitops.h>
#include <sbi/sbi_console.h> #include <sbi/sbi_console.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_csr_detect.h> #include <sbi/sbi_csr_detect.h>
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_pmp.h> #include <sbi/sbi_math.h>
#include <sbi/sbi_platform.h> #include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h> #include <sbi/sbi_pmu.h>
#include <sbi/sbi_string.h> #include <sbi/sbi_string.h>
#include <sbi/sbi_trap.h> #include <sbi/sbi_trap.h>
#include <sbi/sbi_hfence.h>
extern void __sbi_expected_trap(void); extern void __sbi_expected_trap(void);
extern void __sbi_expected_trap_hext(void); extern void __sbi_expected_trap_hext(void);
void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap; void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
unsigned long hart_features_offset; static unsigned long hart_features_offset;
static void mstatus_init(struct sbi_scratch *scratch) static void mstatus_init(struct sbi_scratch *scratch)
{ {
@@ -108,11 +110,6 @@ static void mstatus_init(struct sbi_scratch *scratch)
else else
mstateen_val &= ~SMSTATEEN0_CTR; mstateen_val &= ~SMSTATEEN0_CTR;
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSQOSID))
mstateen_val |= SMSTATEEN0_SRMCFG;
else
mstateen_val &= ~SMSTATEEN0_SRMCFG;
csr_write64(CSR_MSTATEEN0, mstateen_val); csr_write64(CSR_MSTATEEN0, mstateen_val);
csr_write64(CSR_MSTATEEN1, SMSTATEEN_STATEN); csr_write64(CSR_MSTATEEN1, SMSTATEEN_STATEN);
csr_write64(CSR_MSTATEEN2, SMSTATEEN_STATEN); csr_write64(CSR_MSTATEEN2, SMSTATEEN_STATEN);
@@ -272,6 +269,30 @@ unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch)
return hfeatures->mhpm_mask; return hfeatures->mhpm_mask;
} }
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_count;
}
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_log2gran;
}
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_addr_bits;
}
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch) unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
{ {
struct sbi_hart_features *hfeatures = struct sbi_hart_features *hfeatures =
@@ -280,6 +301,307 @@ unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
return hfeatures->mhpm_bits; return hfeatures->mhpm_bits;
} }
/*
* Returns Smepmp flags for a given domain and region based on permissions.
*/
static unsigned int sbi_hart_get_smepmp_flags(struct sbi_scratch *scratch,
struct sbi_domain *dom,
struct sbi_domain_memregion *reg)
{
unsigned int pmp_flags = 0;
if (SBI_DOMAIN_MEMREGION_IS_SHARED(reg->flags)) {
/* Read only for both M and SU modes */
if (SBI_DOMAIN_MEMREGION_IS_SUR_MR(reg->flags))
pmp_flags = (PMP_L | PMP_R | PMP_W | PMP_X);
/* Execute for SU but Read/Execute for M mode */
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MRX(reg->flags))
/* locked region */
pmp_flags = (PMP_L | PMP_W | PMP_X);
/* Execute only for both M and SU modes */
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MX(reg->flags))
pmp_flags = (PMP_L | PMP_W);
/* Read/Write for both M and SU modes */
else if (SBI_DOMAIN_MEMREGION_IS_SURW_MRW(reg->flags))
pmp_flags = (PMP_W | PMP_X);
/* Read only for SU mode but Read/Write for M mode */
else if (SBI_DOMAIN_MEMREGION_IS_SUR_MRW(reg->flags))
pmp_flags = (PMP_W);
} else if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
/*
* When smepmp is supported and used, M region cannot have RWX
* permissions on any region.
*/
if ((reg->flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK)
== SBI_DOMAIN_MEMREGION_M_RWX) {
sbi_printf("%s: M-mode only regions cannot have"
"RWX permissions\n", __func__);
return 0;
}
/* M-mode only access regions are always locked */
pmp_flags |= PMP_L;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
pmp_flags |= PMP_X;
} else if (SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(reg->flags)) {
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
pmp_flags |= PMP_X;
}
return pmp_flags;
}
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
struct sbi_domain *dom,
struct sbi_domain_memregion *reg,
unsigned int pmp_idx,
unsigned int pmp_flags,
unsigned int pmp_log2gran,
unsigned long pmp_addr_max)
{
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
unsigned int pmp_count,
unsigned int pmp_log2gran,
unsigned long pmp_addr_max)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_idx, pmp_flags;
/*
* Set the RLB so that, we can write to PMP entries without
* enforcement even if some entries are locked.
*/
csr_set(CSR_MSECCFG, MSECCFG_RLB);
/* Disable the reserved entry */
pmp_disable(SBI_SMEPMP_RESV_ENTRY);
/* Program M-only regions when MML is not set. */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (pmp_count <= pmp_idx)
break;
/* Skip shared and SU-only regions */
if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg);
if (!pmp_flags)
return 0;
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
/* Set the MML to enforce new encoding */
csr_set(CSR_MSECCFG, MSECCFG_MML);
/* Program shared and SU-only regions */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (pmp_count <= pmp_idx)
break;
/* Skip M-only regions */
if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg);
if (!pmp_flags)
return 0;
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
/*
* All entries are programmed.
* Keep the RLB bit so that dynamic mappings can be done.
*/
return 0;
}
static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
unsigned int pmp_count,
unsigned int pmp_log2gran,
unsigned long pmp_addr_max)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_idx = 0;
unsigned int pmp_flags;
unsigned long pmp_addr;
sbi_domain_for_each_memregion(dom, reg) {
if (pmp_count <= pmp_idx)
break;
pmp_flags = 0;
/*
* If permissions are to be enforced for all modes on
* this region, the lock bit should be set.
*/
if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
pmp_flags |= PMP_L;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
pmp_flags |= PMP_X;
pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
return 0;
}
int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
{
/* shared R/W access for M and S/U mode */
unsigned int pmp_flags = (PMP_W | PMP_X);
unsigned long order, base = 0;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
/* If Smepmp is not supported no special mapping is required */
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
return SBI_ENOSPC;
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
order <= __riscv_xlen; order++) {
if (order < __riscv_xlen) {
base = addr & ~((1UL << order) - 1UL);
if ((base <= addr) &&
(addr < (base + (1UL << order))) &&
(base <= (addr + size - 1UL)) &&
((addr + size - 1UL) < (base + (1UL << order))))
break;
} else {
return SBI_EFAIL;
}
}
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
pmp_flags, base, order);
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
return SBI_OK;
}
int sbi_hart_unmap_saddr(void)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
}
int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
{
int rc;
unsigned int pmp_bits, pmp_log2gran;
unsigned int pmp_count = sbi_hart_pmp_count(scratch);
unsigned long pmp_addr_max;
if (!pmp_count)
return 0;
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
rc = sbi_hart_smepmp_configure(scratch, pmp_count,
pmp_log2gran, pmp_addr_max);
else
rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
pmp_log2gran, pmp_addr_max);
/*
* As per section 3.7.2 of privileged specification v1.12,
* virtual address translations can be speculatively performed
* (even before actual access). These, along with PMP traslations,
* can be cached. This can pose a problem with CPU hotplug
* and non-retentive suspend scenario because PMP states are
* not preserved.
* It is advisable to flush the caching structures under such
* conditions.
*/
if (misa_extension('S')) {
__asm__ __volatile__("sfence.vma");
/*
* If hypervisor mode is supported, flush caching
* structures in guest mode too.
*/
if (misa_extension('H'))
__sbi_hfence_gvma_all();
}
return rc;
}
int sbi_hart_priv_version(struct sbi_scratch *scratch) int sbi_hart_priv_version(struct sbi_scratch *scratch)
{ {
struct sbi_hart_features *hfeatures = struct sbi_hart_features *hfeatures =
@@ -392,10 +714,7 @@ const struct sbi_hart_ext_data sbi_hart_ext[] = {
__SBI_HART_EXT_DATA(ssdbltrp, SBI_HART_EXT_SSDBLTRP), __SBI_HART_EXT_DATA(ssdbltrp, SBI_HART_EXT_SSDBLTRP),
__SBI_HART_EXT_DATA(smctr, SBI_HART_EXT_SMCTR), __SBI_HART_EXT_DATA(smctr, SBI_HART_EXT_SMCTR),
__SBI_HART_EXT_DATA(ssctr, SBI_HART_EXT_SSCTR), __SBI_HART_EXT_DATA(ssctr, SBI_HART_EXT_SSCTR),
__SBI_HART_EXT_DATA(ssqosid, SBI_HART_EXT_SSQOSID),
__SBI_HART_EXT_DATA(ssstateen, SBI_HART_EXT_SSSTATEEN), __SBI_HART_EXT_DATA(ssstateen, SBI_HART_EXT_SSSTATEEN),
__SBI_HART_EXT_DATA(xsfcflushdlone, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1),
__SBI_HART_EXT_DATA(xsfcease, SBI_HART_EXT_XSIFIVE_CEASE),
}; };
_Static_assert(SBI_HART_EXT_MAX == array_size(sbi_hart_ext), _Static_assert(SBI_HART_EXT_MAX == array_size(sbi_hart_ext),
@@ -718,6 +1037,10 @@ int sbi_hart_reinit(struct sbi_scratch *scratch)
if (rc) if (rc)
return rc; return rc;
rc = delegate_traps(scratch);
if (rc)
return rc;
return 0; return 0;
} }
@@ -745,16 +1068,6 @@ int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
if (rc) if (rc)
return rc; return rc;
if (cold_boot) {
rc = sbi_hart_pmp_init(scratch);
if (rc)
return rc;
}
rc = delegate_traps(scratch);
if (rc)
return rc;
return sbi_hart_reinit(scratch); return sbi_hart_reinit(scratch);
} }

View File

@@ -1,356 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Ventana Micro Systems Inc.
*/
#include <sbi/sbi_bitmap.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_hfence.h>
#include <sbi/sbi_math.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_tlb.h>
#include <sbi/riscv_asm.h>
/*
* Smepmp enforces access boundaries between M-mode and
* S/U-mode. When it is enabled, the PMPs are programmed
* such that M-mode doesn't have access to S/U-mode memory.
*
* To give M-mode R/W access to the shared memory between M and
* S/U-mode, first entry is reserved. It is disabled at boot.
* When shared memory access is required, the physical address
* should be programmed into the first PMP entry with R/W
* permissions to the M-mode. Once the work is done, it should be
* unmapped. sbi_hart_protection_map_range/sbi_hart_protection_unmap_range
* function pair should be used to map/unmap the shared memory.
*/
#define SBI_SMEPMP_RESV_ENTRY 0
static DECLARE_BITMAP(fw_smepmp_ids, PMP_COUNT);
static bool fw_smepmp_ids_inited;
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
return hfeatures->pmp_count;
}
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
return hfeatures->pmp_log2gran;
}
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
return hfeatures->pmp_addr_bits;
}
bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx)
{
if (!fw_smepmp_ids_inited)
return false;
return bitmap_test(fw_smepmp_ids, pmp_idx) ? true : false;
}
static void sbi_hart_pmp_fence(void)
{
/*
* As per section 3.7.2 of privileged specification v1.12,
* virtual address translations can be speculatively performed
* (even before actual access). These, along with PMP traslations,
* can be cached. This can pose a problem with CPU hotplug
* and non-retentive suspend scenario because PMP states are
* not preserved.
* It is advisable to flush the caching structures under such
* conditions.
*/
if (misa_extension('S')) {
__sbi_sfence_vma_all();
/*
* If hypervisor mode is supported, flush caching
* structures in guest mode too.
*/
if (misa_extension('H'))
__sbi_hfence_gvma_all();
}
}
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
struct sbi_domain *dom,
struct sbi_domain_memregion *reg,
unsigned int pmp_idx,
unsigned int pmp_flags,
unsigned int pmp_log2gran,
unsigned long pmp_addr_max)
{
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
static bool is_valid_pmp_idx(unsigned int pmp_count, unsigned int pmp_idx)
{
if (pmp_count > pmp_idx)
return true;
sbi_printf("error: insufficient PMP entries\n");
return false;
}
static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_log2gran, pmp_bits;
unsigned int pmp_idx, pmp_count;
unsigned long pmp_addr_max;
unsigned int pmp_flags;
pmp_count = sbi_hart_pmp_count(scratch);
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
/*
* Set the RLB so that, we can write to PMP entries without
* enforcement even if some entries are locked.
*/
csr_set(CSR_MSECCFG, MSECCFG_RLB);
/* Disable the reserved entry */
pmp_disable(SBI_SMEPMP_RESV_ENTRY);
/* Program M-only regions when MML is not set. */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
/* Skip shared and SU-only regions */
if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
/*
* Track firmware PMP entries to preserve them during
* domain switches. Under SmePMP, M-mode requires
* explicit PMP entries to access firmware code/data.
* These entries must remain enabled across domain
* context switches to prevent M-mode access faults.
*/
if (SBI_DOMAIN_MEMREGION_IS_FIRMWARE(reg->flags)) {
if (fw_smepmp_ids_inited) {
/* Check inconsistent firmware region */
if (!sbi_hart_smepmp_is_fw_region(pmp_idx))
return SBI_EINVAL;
} else {
bitmap_set(fw_smepmp_ids, pmp_idx, 1);
}
}
pmp_flags = sbi_domain_get_smepmp_flags(reg);
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
fw_smepmp_ids_inited = true;
/* Set the MML to enforce new encoding */
csr_set(CSR_MSECCFG, MSECCFG_MML);
/* Program shared and SU-only regions */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
/* Skip M-only regions */
if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
pmp_flags = sbi_domain_get_smepmp_flags(reg);
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
/*
* All entries are programmed.
* Keep the RLB bit so that dynamic mappings can be done.
*/
sbi_hart_pmp_fence();
return 0;
}
static int sbi_hart_smepmp_map_range(struct sbi_scratch *scratch,
unsigned long addr, unsigned long size)
{
/* shared R/W access for M and S/U mode */
unsigned int pmp_flags = (PMP_W | PMP_X);
unsigned long order, base = 0;
if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
return SBI_ENOSPC;
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
order <= __riscv_xlen; order++) {
if (order < __riscv_xlen) {
base = addr & ~((1UL << order) - 1UL);
if ((base <= addr) &&
(addr < (base + (1UL << order))) &&
(base <= (addr + size - 1UL)) &&
((addr + size - 1UL) < (base + (1UL << order))))
break;
} else {
return SBI_EFAIL;
}
}
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
pmp_flags, base, order);
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
return SBI_OK;
}
static int sbi_hart_smepmp_unmap_range(struct sbi_scratch *scratch,
unsigned long addr, unsigned long size)
{
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
}
static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned long pmp_addr, pmp_addr_max;
unsigned int pmp_log2gran, pmp_bits;
unsigned int pmp_idx, pmp_count;
unsigned int pmp_flags;
pmp_count = sbi_hart_pmp_count(scratch);
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
pmp_flags = 0;
/*
* If permissions are to be enforced for all modes on
* this region, the lock bit should be set.
*/
if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
pmp_flags |= PMP_L;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
pmp_flags |= PMP_X;
pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
sbi_hart_pmp_fence();
return 0;
}
static void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
{
int i, pmp_count = sbi_hart_pmp_count(scratch);
for (i = 0; i < pmp_count; i++) {
/* Don't revoke firmware access permissions */
if (sbi_hart_smepmp_is_fw_region(i))
continue;
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), i);
pmp_disable(i);
}
}
static struct sbi_hart_protection pmp_protection = {
.name = "pmp",
.rating = 100,
.configure = sbi_hart_oldpmp_configure,
.unconfigure = sbi_hart_pmp_unconfigure,
};
static struct sbi_hart_protection epmp_protection = {
.name = "epmp",
.rating = 200,
.configure = sbi_hart_smepmp_configure,
.unconfigure = sbi_hart_pmp_unconfigure,
.map_range = sbi_hart_smepmp_map_range,
.unmap_range = sbi_hart_smepmp_unmap_range,
};
int sbi_hart_pmp_init(struct sbi_scratch *scratch)
{
int rc;
if (sbi_hart_pmp_count(scratch)) {
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) {
rc = sbi_hart_protection_register(&epmp_protection);
if (rc)
return rc;
} else {
rc = sbi_hart_protection_register(&pmp_protection);
if (rc)
return rc;
}
}
return 0;
}

View File

@@ -1,96 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Ventana Micro Systems Inc.
*/
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_scratch.h>
static SBI_LIST_HEAD(hart_protection_list);
struct sbi_hart_protection *sbi_hart_protection_best(void)
{
if (sbi_list_empty(&hart_protection_list))
return NULL;
return sbi_list_first_entry(&hart_protection_list, struct sbi_hart_protection, head);
}
int sbi_hart_protection_register(struct sbi_hart_protection *hprot)
{
struct sbi_hart_protection *pos = NULL;
bool found_pos = false;
if (!hprot)
return SBI_EINVAL;
sbi_list_for_each_entry(pos, &hart_protection_list, head) {
if (hprot->rating > pos->rating) {
found_pos = true;
break;
}
}
if (found_pos)
sbi_list_add_tail(&hprot->head, &pos->head);
else
sbi_list_add_tail(&hprot->head, &hart_protection_list);
return 0;
}
void sbi_hart_protection_unregister(struct sbi_hart_protection *hprot)
{
if (!hprot)
return;
sbi_list_del(&hprot->head);
}
int sbi_hart_protection_configure(struct sbi_scratch *scratch)
{
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
if (!hprot)
return SBI_EINVAL;
if (!hprot->configure)
return SBI_ENOSYS;
return hprot->configure(scratch);
}
void sbi_hart_protection_unconfigure(struct sbi_scratch *scratch)
{
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
if (!hprot || !hprot->unconfigure)
return;
hprot->unconfigure(scratch);
}
int sbi_hart_protection_map_range(unsigned long base, unsigned long size)
{
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
if (!hprot)
return SBI_EINVAL;
if (!hprot->map_range)
return 0;
return hprot->map_range(sbi_scratch_thishart_ptr(), base, size);
}
int sbi_hart_protection_unmap_range(unsigned long base, unsigned long size)
{
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
if (!hprot)
return SBI_EINVAL;
if (!hprot->unmap_range)
return 0;
return hprot->unmap_range(sbi_scratch_thishart_ptr(), base, size);
}

View File

@@ -16,9 +16,7 @@
/* Minimum size and alignment of heap allocations */ /* Minimum size and alignment of heap allocations */
#define HEAP_ALLOC_ALIGN 64 #define HEAP_ALLOC_ALIGN 64
#define HEAP_HOUSEKEEPING_FACTOR 16
/* Number of heap nodes to allocate at once */
#define HEAP_NODE_BATCH_SIZE 8
struct heap_node { struct heap_node {
struct sbi_dlist head; struct sbi_dlist head;
@@ -30,50 +28,20 @@ struct sbi_heap_control {
spinlock_t lock; spinlock_t lock;
unsigned long base; unsigned long base;
unsigned long size; unsigned long size;
unsigned long resv; unsigned long hkbase;
unsigned long hksize;
struct sbi_dlist free_node_list; struct sbi_dlist free_node_list;
struct sbi_dlist free_space_list; struct sbi_dlist free_space_list;
struct sbi_dlist used_space_list; struct sbi_dlist used_space_list;
struct heap_node init_free_space_node;
}; };
struct sbi_heap_control global_hpctrl; struct sbi_heap_control global_hpctrl;
static bool alloc_nodes(struct sbi_heap_control *hpctrl)
{
size_t size = HEAP_NODE_BATCH_SIZE * sizeof(struct heap_node);
struct heap_node *n, *new = NULL;
/* alloc_with_align() requires at most two free nodes */
if (hpctrl->free_node_list.next != hpctrl->free_node_list.prev)
return true;
sbi_list_for_each_entry_reverse(n, &hpctrl->free_space_list, head) {
if (n->size >= size) {
n->size -= size;
if (!n->size) {
sbi_list_del(&n->head);
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
}
new = (void *)(n->addr + n->size);
break;
}
}
if (!new)
return false;
for (size_t i = 0; i < HEAP_NODE_BATCH_SIZE; i++)
sbi_list_add_tail(&new[i].head, &hpctrl->free_node_list);
hpctrl->resv += size;
return true;
}
static void *alloc_with_align(struct sbi_heap_control *hpctrl, static void *alloc_with_align(struct sbi_heap_control *hpctrl,
size_t align, size_t size) size_t align, size_t size)
{ {
void *ret = NULL; void *ret = NULL;
struct heap_node *n, *np; struct heap_node *n, *np, *rem;
unsigned long lowest_aligned; unsigned long lowest_aligned;
size_t pad; size_t pad;
@@ -85,10 +53,6 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
spin_lock(&hpctrl->lock); spin_lock(&hpctrl->lock);
/* Ensure at least two free nodes are available for use below */
if (!alloc_nodes(hpctrl))
goto out;
np = NULL; np = NULL;
sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) { sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) {
lowest_aligned = ROUNDUP(n->addr, align); lowest_aligned = ROUNDUP(n->addr, align);
@@ -103,34 +67,55 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
goto out; goto out;
if (pad) { if (pad) {
if (sbi_list_empty(&hpctrl->free_node_list)) {
goto out;
}
n = sbi_list_first_entry(&hpctrl->free_node_list, n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head); struct heap_node, head);
sbi_list_del(&n->head); sbi_list_del(&n->head);
n->addr = np->addr; if ((size + pad < np->size) &&
n->size = pad; !sbi_list_empty(&hpctrl->free_node_list)) {
sbi_list_add_tail(&n->head, &np->head); rem = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&rem->head);
rem->addr = np->addr + (size + pad);
rem->size = np->size - (size + pad);
sbi_list_add_tail(&rem->head,
&hpctrl->free_space_list);
} else if (size + pad != np->size) {
/* Can't allocate, return n */
sbi_list_add(&n->head, &hpctrl->free_node_list);
ret = NULL;
goto out;
}
np->addr += pad; n->addr = lowest_aligned;
np->size -= pad; n->size = size;
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
np->size = pad;
ret = (void *)n->addr;
} else {
if ((size < np->size) &&
!sbi_list_empty(&hpctrl->free_node_list)) {
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
n->addr = np->addr;
n->size = size;
np->addr += size;
np->size -= size;
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
ret = (void *)n->addr;
} else if (size == np->size) {
sbi_list_del(&np->head);
sbi_list_add_tail(&np->head, &hpctrl->used_space_list);
ret = (void *)np->addr;
}
} }
if (size < np->size) {
n = sbi_list_first_entry(&hpctrl->free_node_list,
struct heap_node, head);
sbi_list_del(&n->head);
n->addr = np->addr + size;
n->size = np->size - size;
sbi_list_add(&n->head, &np->head);
np->size = size;
}
sbi_list_del(&np->head);
sbi_list_add_tail(&np->head, &hpctrl->used_space_list);
ret = (void *)np->addr;
out: out:
spin_unlock(&hpctrl->lock); spin_unlock(&hpctrl->lock);
@@ -231,32 +216,44 @@ unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl)
unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl) unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl)
{ {
return hpctrl->size - hpctrl->resv - sbi_heap_free_space(); return hpctrl->size - hpctrl->hksize - sbi_heap_free_space();
} }
unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl) unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl)
{ {
return hpctrl->resv; return hpctrl->hksize;
} }
int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base, int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base,
unsigned long size) unsigned long size)
{ {
unsigned long i;
struct heap_node *n; struct heap_node *n;
/* Initialize heap control */ /* Initialize heap control */
SPIN_LOCK_INIT(hpctrl->lock); SPIN_LOCK_INIT(hpctrl->lock);
hpctrl->base = base; hpctrl->base = base;
hpctrl->size = size; hpctrl->size = size;
hpctrl->resv = 0; hpctrl->hkbase = hpctrl->base;
hpctrl->hksize = hpctrl->size / HEAP_HOUSEKEEPING_FACTOR;
hpctrl->hksize &= ~((unsigned long)HEAP_BASE_ALIGN - 1);
SBI_INIT_LIST_HEAD(&hpctrl->free_node_list); SBI_INIT_LIST_HEAD(&hpctrl->free_node_list);
SBI_INIT_LIST_HEAD(&hpctrl->free_space_list); SBI_INIT_LIST_HEAD(&hpctrl->free_space_list);
SBI_INIT_LIST_HEAD(&hpctrl->used_space_list); SBI_INIT_LIST_HEAD(&hpctrl->used_space_list);
/* Prepare free node list */
for (i = 0; i < (hpctrl->hksize / sizeof(*n)); i++) {
n = (struct heap_node *)(hpctrl->hkbase + (sizeof(*n) * i));
n->addr = n->size = 0;
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
}
/* Prepare free space list */ /* Prepare free space list */
n = &hpctrl->init_free_space_node; n = sbi_list_first_entry(&hpctrl->free_node_list,
n->addr = base; struct heap_node, head);
n->size = size; sbi_list_del(&n->head);
n->addr = hpctrl->hkbase + hpctrl->hksize;
n->size = hpctrl->size - hpctrl->hksize;
sbi_list_add_tail(&n->head, &hpctrl->free_space_list); sbi_list_add_tail(&n->head, &hpctrl->free_space_list);
return 0; return 0;

View File

@@ -47,8 +47,10 @@ struct sbi_hsm_data {
unsigned long saved_mie; unsigned long saved_mie;
unsigned long saved_mip; unsigned long saved_mip;
unsigned long saved_medeleg; unsigned long saved_medeleg;
unsigned long saved_mideleg; unsigned long saved_menvcfg;
u64 saved_menvcfg; #if __riscv_xlen == 32
unsigned long saved_menvcfgh;
#endif
atomic_t start_ticket; atomic_t start_ticket;
}; };
@@ -364,7 +366,7 @@ int sbi_hsm_hart_start(struct sbi_scratch *scratch,
(hsm_device_has_hart_secondary_boot() && !init_count)) { (hsm_device_has_hart_secondary_boot() && !init_count)) {
rc = hsm_device_hart_start(hartid, scratch->warmboot_addr); rc = hsm_device_hart_start(hartid, scratch->warmboot_addr);
} else { } else {
rc = sbi_ipi_raw_send(hartindex, true); rc = sbi_ipi_raw_send(hartindex);
} }
if (!rc) if (!rc)
@@ -427,9 +429,12 @@ void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
hdata->saved_mie = csr_read(CSR_MIE); hdata->saved_mie = csr_read(CSR_MIE);
hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP); hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
hdata->saved_medeleg = csr_read(CSR_MEDELEG); hdata->saved_medeleg = csr_read(CSR_MEDELEG);
hdata->saved_mideleg = csr_read(CSR_MIDELEG); if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) #if __riscv_xlen == 32
hdata->saved_menvcfg = csr_read64(CSR_MENVCFG); hdata->saved_menvcfgh = csr_read(CSR_MENVCFGH);
#endif
hdata->saved_menvcfg = csr_read(CSR_MENVCFG);
}
} }
static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch) static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
@@ -437,9 +442,12 @@ static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
hart_data_offset); hart_data_offset);
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
csr_write64(CSR_MENVCFG, hdata->saved_menvcfg); csr_write(CSR_MENVCFG, hdata->saved_menvcfg);
csr_write(CSR_MIDELEG, hdata->saved_mideleg); #if __riscv_xlen == 32
csr_write(CSR_MENVCFGH, hdata->saved_menvcfgh);
#endif
}
csr_write(CSR_MEDELEG, hdata->saved_medeleg); csr_write(CSR_MEDELEG, hdata->saved_medeleg);
csr_write(CSR_MIE, hdata->saved_mie); csr_write(CSR_MIE, hdata->saved_mie);
csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP))); csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
@@ -455,10 +463,7 @@ void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
SBI_HSM_STATE_RESUME_PENDING)) SBI_HSM_STATE_RESUME_PENDING))
sbi_hart_hang(); sbi_hart_hang();
if (sbi_system_is_suspended()) hsm_device_hart_resume();
sbi_system_resume();
else
hsm_device_hart_resume();
} }
void __noreturn sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch, void __noreturn sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch,

View File

@@ -18,8 +18,6 @@
#include <sbi/sbi_fwft.h> #include <sbi/sbi_fwft.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hartmask.h> #include <sbi/sbi_hartmask.h>
#include <sbi/sbi_hart_pmp.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_hsm.h> #include <sbi/sbi_hsm.h>
#include <sbi/sbi_ipi.h> #include <sbi/sbi_ipi.h>
@@ -76,7 +74,6 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch)
const struct sbi_hsm_device *hdev; const struct sbi_hsm_device *hdev;
const struct sbi_ipi_device *idev; const struct sbi_ipi_device *idev;
const struct sbi_timer_device *tdev; const struct sbi_timer_device *tdev;
const struct sbi_hart_protection *hprot;
const struct sbi_console_device *cdev; const struct sbi_console_device *cdev;
const struct sbi_system_reset_device *srdev; const struct sbi_system_reset_device *srdev;
const struct sbi_system_suspend_device *susp_dev; const struct sbi_system_suspend_device *susp_dev;
@@ -93,9 +90,6 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch)
sbi_printf("Platform Features : %s\n", str); sbi_printf("Platform Features : %s\n", str);
sbi_printf("Platform HART Count : %u\n", sbi_printf("Platform HART Count : %u\n",
sbi_platform_hart_count(plat)); sbi_platform_hart_count(plat));
hprot = sbi_hart_protection_best();
sbi_printf("Platform HART Protection : %s\n",
(hprot) ? hprot->name : "---");
idev = sbi_ipi_get_device(); idev = sbi_ipi_get_device();
sbi_printf("Platform IPI Device : %s\n", sbi_printf("Platform IPI Device : %s\n",
(idev) ? idev->name : "---"); (idev) ? idev->name : "---");
@@ -390,12 +384,12 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
} }
/* /*
* Configure hart isolation at last because if SMEPMP is, * Configure PMP at last because if SMEPMP is detected,
* detected, M-mode access to the S/U space will be rescinded. * M-mode access to the S/U space will be rescinded.
*/ */
rc = sbi_hart_protection_configure(scratch); rc = sbi_hart_pmp_configure(scratch);
if (rc) { if (rc) {
sbi_printf("%s: hart isolation configure failed (error %d)\n", sbi_printf("%s: PMP configure failed (error %d)\n",
__func__, rc); __func__, rc);
sbi_hart_hang(); sbi_hart_hang();
} }
@@ -469,10 +463,10 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
sbi_hart_hang(); sbi_hart_hang();
/* /*
* Configure hart isolation at last because if SMEPMP is, * Configure PMP at last because if SMEPMP is detected,
* detected, M-mode access to the S/U space will be rescinded. * M-mode access to the S/U space will be rescinded.
*/ */
rc = sbi_hart_protection_configure(scratch); rc = sbi_hart_pmp_configure(scratch);
if (rc) if (rc)
sbi_hart_hang(); sbi_hart_hang();
@@ -493,7 +487,7 @@ static void __noreturn init_warm_resume(struct sbi_scratch *scratch,
if (rc) if (rc)
sbi_hart_hang(); sbi_hart_hang();
rc = sbi_hart_protection_configure(scratch); rc = sbi_hart_pmp_configure(scratch);
if (rc) if (rc)
sbi_hart_hang(); sbi_hart_hang();
@@ -513,7 +507,7 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
if (hstate == SBI_HSM_STATE_SUSPENDED) { if (hstate == SBI_HSM_STATE_SUSPENDED) {
init_warm_resume(scratch, hartid); init_warm_resume(scratch, hartid);
} else { } else {
sbi_ipi_raw_clear(true); sbi_ipi_raw_clear();
init_warm_startup(scratch, hartid); init_warm_startup(scratch, hartid);
} }
} }

View File

@@ -15,11 +15,9 @@
#include <sbi/sbi_domain.h> #include <sbi/sbi_domain.h>
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_heap.h>
#include <sbi/sbi_hsm.h> #include <sbi/sbi_hsm.h>
#include <sbi/sbi_init.h> #include <sbi/sbi_init.h>
#include <sbi/sbi_ipi.h> #include <sbi/sbi_ipi.h>
#include <sbi/sbi_list.h>
#include <sbi/sbi_platform.h> #include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h> #include <sbi/sbi_pmu.h>
#include <sbi/sbi_string.h> #include <sbi/sbi_string.h>
@@ -34,14 +32,8 @@ _Static_assert(
"type of sbi_ipi_data.ipi_type has changed, please redefine SBI_IPI_EVENT_MAX" "type of sbi_ipi_data.ipi_type has changed, please redefine SBI_IPI_EVENT_MAX"
); );
struct sbi_ipi_device_node {
struct sbi_dlist head;
const struct sbi_ipi_device *dev;
};
static unsigned long ipi_data_off; static unsigned long ipi_data_off;
static const struct sbi_ipi_device *ipi_dev = NULL; static const struct sbi_ipi_device *ipi_dev = NULL;
static SBI_LIST_HEAD(ipi_dev_node_list);
static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX]; static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX];
static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex, static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex,
@@ -88,7 +80,7 @@ static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex,
*/ */
if (!__atomic_fetch_or(&ipi_data->ipi_type, if (!__atomic_fetch_or(&ipi_data->ipi_type,
BIT(event), __ATOMIC_RELAXED)) BIT(event), __ATOMIC_RELAXED))
ret = sbi_ipi_raw_send(remote_hartindex, false); ret = sbi_ipi_raw_send(remote_hartindex);
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_SENT); sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_SENT);
@@ -256,7 +248,7 @@ void sbi_ipi_process(void)
sbi_scratch_offset_ptr(scratch, ipi_data_off); sbi_scratch_offset_ptr(scratch, ipi_data_off);
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_RECVD); sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_RECVD);
sbi_ipi_raw_clear(false); sbi_ipi_raw_clear();
ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0); ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0);
ipi_event = 0; ipi_event = 0;
@@ -271,10 +263,8 @@ void sbi_ipi_process(void)
} }
} }
int sbi_ipi_raw_send(u32 hartindex, bool all_devices) int sbi_ipi_raw_send(u32 hartindex)
{ {
struct sbi_ipi_device_node *entry;
if (!ipi_dev || !ipi_dev->ipi_send) if (!ipi_dev || !ipi_dev->ipi_send)
return SBI_EINVAL; return SBI_EINVAL;
@@ -289,31 +279,14 @@ int sbi_ipi_raw_send(u32 hartindex, bool all_devices)
*/ */
wmb(); wmb();
if (all_devices) { ipi_dev->ipi_send(hartindex);
sbi_list_for_each_entry(entry, &ipi_dev_node_list, head) {
if (entry->dev->ipi_send)
entry->dev->ipi_send(hartindex);
}
} else {
ipi_dev->ipi_send(hartindex);
}
return 0; return 0;
} }
void sbi_ipi_raw_clear(bool all_devices) void sbi_ipi_raw_clear(void)
{ {
struct sbi_ipi_device_node *entry; if (ipi_dev && ipi_dev->ipi_clear)
ipi_dev->ipi_clear();
if (all_devices) {
sbi_list_for_each_entry(entry, &ipi_dev_node_list, head) {
if (entry->dev->ipi_clear)
entry->dev->ipi_clear();
}
} else {
if (ipi_dev && ipi_dev->ipi_clear)
ipi_dev->ipi_clear();
}
/* /*
* Ensure that memory or MMIO writes after this * Ensure that memory or MMIO writes after this
@@ -332,22 +305,12 @@ const struct sbi_ipi_device *sbi_ipi_get_device(void)
return ipi_dev; return ipi_dev;
} }
void sbi_ipi_add_device(const struct sbi_ipi_device *dev) void sbi_ipi_set_device(const struct sbi_ipi_device *dev)
{ {
struct sbi_ipi_device_node *entry; if (!dev || ipi_dev)
if (!dev)
return; return;
entry = sbi_zalloc(sizeof(*entry)); ipi_dev = dev;
if (!entry)
return;
SBI_INIT_LIST_HEAD(&entry->head);
entry->dev = dev;
sbi_list_add_tail(&entry->head, &ipi_dev_node_list);
if (!ipi_dev || ipi_dev->rating < dev->rating)
ipi_dev = dev;
} }
int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot) int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
@@ -367,6 +330,11 @@ int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
if (ret < 0) if (ret < 0)
return ret; return ret;
ipi_halt_event = ret; ipi_halt_event = ret;
/* Initialize platform IPI support */
ret = sbi_platform_ipi_init(sbi_platform_ptr(scratch));
if (ret)
return ret;
} else { } else {
if (!ipi_data_off) if (!ipi_data_off)
return SBI_ENOMEM; return SBI_ENOMEM;
@@ -379,7 +347,7 @@ int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
ipi_data->ipi_type = 0x00; ipi_data->ipi_type = 0x00;
/* Clear any pending IPIs for the current hart */ /* Clear any pending IPIs for the current hart */
sbi_ipi_raw_clear(true); sbi_ipi_raw_clear();
/* Enable software interrupts */ /* Enable software interrupts */
csr_set(CSR_MIE, MIP_MSIP); csr_set(CSR_MIE, MIP_MSIP);

View File

@@ -11,7 +11,6 @@
#include <sbi/sbi_domain.h> #include <sbi/sbi_domain.h>
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_platform.h> #include <sbi/sbi_platform.h>
#include <sbi/sbi_mpxy.h> #include <sbi/sbi_mpxy.h>
@@ -376,10 +375,10 @@ int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
if (flags == SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN) { if (flags == SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN) {
ret_buf = (unsigned long *)(ulong)SHMEM_PHYS_ADDR(shmem_phys_hi, ret_buf = (unsigned long *)(ulong)SHMEM_PHYS_ADDR(shmem_phys_hi,
shmem_phys_lo); shmem_phys_lo);
sbi_hart_protection_map_range((unsigned long)ret_buf, mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)ret_buf, mpxy_shmem_size);
ret_buf[0] = cpu_to_lle(ms->shmem.shmem_addr_lo); ret_buf[0] = cpu_to_lle(ms->shmem.shmem_addr_lo);
ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_hi); ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_hi);
sbi_hart_protection_unmap_range((unsigned long)ret_buf, mpxy_shmem_size); sbi_hart_unmap_saddr();
} }
/** Setup the new shared memory */ /** Setup the new shared memory */
@@ -408,7 +407,7 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
return SBI_ERR_INVALID_PARAM; return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms); shmem_base = hart_shmem_base(ms);
sbi_hart_protection_map_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
/** number of channel ids which can be stored in shmem adjusting /** number of channel ids which can be stored in shmem adjusting
* for remaining and returned fields */ * for remaining and returned fields */
@@ -435,7 +434,7 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
shmem_base[0] = cpu_to_le32(remaining); shmem_base[0] = cpu_to_le32(remaining);
shmem_base[1] = cpu_to_le32(returned); shmem_base[1] = cpu_to_le32(returned);
sbi_hart_protection_unmap_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size); sbi_hart_unmap_saddr();
return SBI_SUCCESS; return SBI_SUCCESS;
} }
@@ -466,7 +465,7 @@ int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
shmem_base = hart_shmem_base(ms); shmem_base = hart_shmem_base(ms);
end_id = base_attr_id + attr_count - 1; end_id = base_attr_id + attr_count - 1;
sbi_hart_protection_map_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
/* Standard attributes range check */ /* Standard attributes range check */
if (mpxy_is_std_attr(base_attr_id)) { if (mpxy_is_std_attr(base_attr_id)) {
@@ -505,7 +504,7 @@ int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
base_attr_id, attr_count); base_attr_id, attr_count);
} }
out: out:
sbi_hart_protection_unmap_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size); sbi_hart_unmap_saddr();
return ret; return ret;
} }
@@ -617,7 +616,7 @@ int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
shmem_base = hart_shmem_base(ms); shmem_base = hart_shmem_base(ms);
end_id = base_attr_id + attr_count - 1; end_id = base_attr_id + attr_count - 1;
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
mem_ptr = (u32 *)shmem_base; mem_ptr = (u32 *)shmem_base;
@@ -674,7 +673,7 @@ int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
base_attr_id, attr_count); base_attr_id, attr_count);
} }
out: out:
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_unmap_saddr();
return ret; return ret;
} }
@@ -706,7 +705,7 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
return SBI_ERR_INVALID_PARAM; return SBI_ERR_INVALID_PARAM;
shmem_base = hart_shmem_base(ms); shmem_base = hart_shmem_base(ms);
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
if (resp_data_len) { if (resp_data_len) {
resp_buf = shmem_base; resp_buf = shmem_base;
@@ -723,7 +722,7 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
msg_data_len); msg_data_len);
} }
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_unmap_saddr();
if (ret == SBI_ERR_TIMEOUT || ret == SBI_ERR_IO) if (ret == SBI_ERR_TIMEOUT || ret == SBI_ERR_IO)
return ret; return ret;
@@ -753,12 +752,12 @@ int sbi_mpxy_get_notification_events(u32 channel_id, unsigned long *events_len)
return SBI_ERR_NOT_SUPPORTED; return SBI_ERR_NOT_SUPPORTED;
shmem_base = hart_shmem_base(ms); shmem_base = hart_shmem_base(ms);
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
eventsbuf = shmem_base; eventsbuf = shmem_base;
ret = channel->get_notification_events(channel, eventsbuf, ret = channel->get_notification_events(channel, eventsbuf,
mpxy_shmem_size, mpxy_shmem_size,
events_len); events_len);
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size); sbi_hart_unmap_saddr();
if (ret) if (ret)
return ret; return ret;

View File

@@ -13,7 +13,6 @@
#include <sbi/sbi_domain.h> #include <sbi/sbi_domain.h>
#include <sbi/sbi_ecall_interface.h> #include <sbi/sbi_ecall_interface.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_platform.h> #include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h> #include <sbi/sbi_pmu.h>
@@ -57,14 +56,6 @@ union sbi_pmu_ctr_info {
#error "Can't handle firmware counters beyond BITS_PER_LONG" #error "Can't handle firmware counters beyond BITS_PER_LONG"
#endif #endif
/** HW event configuration parameters */
struct sbi_pmu_hw_event_config {
/* event_data value from sbi_pmu_ctr_cfg_match() */
uint64_t event_data;
/* HW events flags from sbi_pmu_ctr_cfg_match() */
uint64_t flags;
};
/** Per-HART state of the PMU counters */ /** Per-HART state of the PMU counters */
struct sbi_pmu_hart_state { struct sbi_pmu_hart_state {
/* HART to which this state belongs */ /* HART to which this state belongs */
@@ -81,12 +72,6 @@ struct sbi_pmu_hart_state {
* and hence can optimally share the same memory. * and hence can optimally share the same memory.
*/ */
uint64_t fw_counters_data[SBI_PMU_FW_CTR_MAX]; uint64_t fw_counters_data[SBI_PMU_FW_CTR_MAX];
/* HW events configuration parameters from
* sbi_pmu_ctr_cfg_match() command which are
* used for restoring RAW hardware events after
* cpu suspending.
*/
struct sbi_pmu_hw_event_config hw_counters_cfg[SBI_PMU_HW_CTR_MAX];
}; };
/** Offset of pointer to PMU HART state in scratch space */ /** Offset of pointer to PMU HART state in scratch space */
@@ -458,7 +443,7 @@ static int pmu_ctr_start_fw(struct sbi_pmu_hart_state *phs,
!pmu_dev->fw_counter_write_value || !pmu_dev->fw_counter_write_value ||
!pmu_dev->fw_counter_start) { !pmu_dev->fw_counter_start) {
return SBI_EINVAL; return SBI_EINVAL;
} }
if (ival_update) if (ival_update)
pmu_dev->fw_counter_write_value(phs->hartid, pmu_dev->fw_counter_write_value(phs->hartid,
@@ -478,61 +463,6 @@ static int pmu_ctr_start_fw(struct sbi_pmu_hart_state *phs,
return 0; return 0;
} }
static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
{
if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
*mhpmevent_val |= MHPMEVENT_VUINH;
if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
*mhpmevent_val |= MHPMEVENT_VSINH;
if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
*mhpmevent_val |= MHPMEVENT_UINH;
if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
*mhpmevent_val |= MHPMEVENT_SINH;
}
static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
unsigned long flags, unsigned long eindex,
uint64_t data)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
uint64_t mhpmevent_val;
/* Get the final mhpmevent value to be written from platform */
mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
return SBI_EFAIL;
/**
* Always set the OVF bit(disable interrupts) and inhibit counting of
* events in M-mode. The OVF bit should be enabled during the start call.
*/
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
MHPMEVENT_MINH | MHPMEVENT_OF;
if (pmu_dev && pmu_dev->hw_counter_disable_irq)
pmu_dev->hw_counter_disable_irq(ctr_idx);
/* Update the inhibit flags based on inhibit flags received from supervisor */
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
pmu_update_inhibit_flags(flags, &mhpmevent_val);
if (pmu_dev && pmu_dev->hw_counter_filter_mode)
pmu_dev->hw_counter_filter_mode(flags, ctr_idx);
#if __riscv_xlen == 32
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3,
mhpmevent_val >> BITS_PER_LONG);
#else
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
#endif
return 0;
}
int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask, int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
unsigned long flags, uint64_t ival) unsigned long flags, uint64_t ival)
{ {
@@ -569,20 +499,9 @@ int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
: 0x0; : 0x0;
ret = pmu_ctr_start_fw(phs, cidx, event_code, edata, ret = pmu_ctr_start_fw(phs, cidx, event_code, edata,
ival, bUpdate); ival, bUpdate);
} else {
if (cidx >= 3) {
struct sbi_pmu_hw_event_config *ev_cfg =
&phs->hw_counters_cfg[cidx];
ret = pmu_update_hw_mhpmevent(&hw_event_map[cidx], cidx,
ev_cfg->flags,
phs->active_events[cidx],
ev_cfg->event_data);
if (ret)
return ret;
}
ret = pmu_ctr_start_hw(cidx, ival, bUpdate);
} }
else
ret = pmu_ctr_start_hw(cidx, ival, bUpdate);
} }
return ret; return ret;
@@ -695,6 +614,61 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
return ret; return ret;
} }
static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
{
if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
*mhpmevent_val |= MHPMEVENT_VUINH;
if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
*mhpmevent_val |= MHPMEVENT_VSINH;
if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
*mhpmevent_val |= MHPMEVENT_UINH;
if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
*mhpmevent_val |= MHPMEVENT_SINH;
}
static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
unsigned long flags, unsigned long eindex,
uint64_t data)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
uint64_t mhpmevent_val;
/* Get the final mhpmevent value to be written from platform */
mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
return SBI_EFAIL;
/**
* Always set the OVF bit(disable interrupts) and inhibit counting of
* events in M-mode. The OVF bit should be enabled during the start call.
*/
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
MHPMEVENT_MINH | MHPMEVENT_OF;
if (pmu_dev && pmu_dev->hw_counter_disable_irq)
pmu_dev->hw_counter_disable_irq(ctr_idx);
/* Update the inhibit flags based on inhibit flags received from supervisor */
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
pmu_update_inhibit_flags(flags, &mhpmevent_val);
if (pmu_dev && pmu_dev->hw_counter_filter_mode)
pmu_dev->hw_counter_filter_mode(flags, ctr_idx);
#if __riscv_xlen == 32
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3,
mhpmevent_val >> BITS_PER_LONG);
#else
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
#endif
return 0;
}
static int pmu_fixed_ctr_update_inhibit_bits(int fixed_ctr, unsigned long flags) static int pmu_fixed_ctr_update_inhibit_bits(int fixed_ctr, unsigned long flags)
{ {
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
@@ -806,7 +780,6 @@ static int pmu_ctr_find_hw(struct sbi_pmu_hart_state *phs,
continue; continue;
/* We found a valid counter that is not started yet */ /* We found a valid counter that is not started yet */
ctr_idx = cbase; ctr_idx = cbase;
break;
} }
} }
@@ -844,7 +817,7 @@ static int pmu_ctr_find_fw(struct sbi_pmu_hart_state *phs,
cidx = i + cbase; cidx = i + cbase;
if (cidx < num_hw_ctrs || total_ctrs <= cidx) if (cidx < num_hw_ctrs || total_ctrs <= cidx)
continue; continue;
if (phs->active_events[cidx] != SBI_PMU_EVENT_IDX_INVALID) if (phs->active_events[i] != SBI_PMU_EVENT_IDX_INVALID)
continue; continue;
if (SBI_PMU_FW_PLATFORM == event_code && if (SBI_PMU_FW_PLATFORM == event_code &&
pmu_dev && pmu_dev->fw_counter_match_encoding) { pmu_dev && pmu_dev->fw_counter_match_encoding) {
@@ -854,7 +827,7 @@ static int pmu_ctr_find_fw(struct sbi_pmu_hart_state *phs,
continue; continue;
} }
return cidx; return i;
} }
return SBI_ENOTSUPP; return SBI_ENOTSUPP;
@@ -900,20 +873,12 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
/* Any firmware counter can be used track any firmware event */ /* Any firmware counter can be used track any firmware event */
ctr_idx = pmu_ctr_find_fw(phs, cidx_base, cidx_mask, ctr_idx = pmu_ctr_find_fw(phs, cidx_base, cidx_mask,
event_code, event_data); event_code, event_data);
if ((event_code == SBI_PMU_FW_PLATFORM) && (ctr_idx >= num_hw_ctrs)) if (event_code == SBI_PMU_FW_PLATFORM)
phs->fw_counters_data[ctr_idx - num_hw_ctrs] = phs->fw_counters_data[ctr_idx - num_hw_ctrs] =
event_data; event_data;
} else { } else {
ctr_idx = pmu_ctr_find_hw(phs, cidx_base, cidx_mask, flags, ctr_idx = pmu_ctr_find_hw(phs, cidx_base, cidx_mask, flags,
event_idx, event_data); event_idx, event_data);
if (ctr_idx >= 0) {
struct sbi_pmu_hw_event_config *ev_cfg =
&phs->hw_counters_cfg[ctr_idx];
ev_cfg->event_data = event_data;
/* Remove flags that are used in match call only */
ev_cfg->flags = flags & SBI_PMU_CFG_EVENT_MASK;
}
} }
if (ctr_idx < 0) if (ctr_idx < 0)
@@ -1054,7 +1019,7 @@ int sbi_pmu_event_get_info(unsigned long shmem_phys_lo, unsigned long shmem_phys
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) SBI_DOMAIN_READ | SBI_DOMAIN_WRITE))
return SBI_ERR_INVALID_ADDRESS; return SBI_ERR_INVALID_ADDRESS;
sbi_hart_protection_map_range(shmem_phys_lo, shmem_size); sbi_hart_map_saddr(shmem_phys_lo, shmem_size);
einfo = (struct sbi_pmu_event_info *)(shmem_phys_lo); einfo = (struct sbi_pmu_event_info *)(shmem_phys_lo);
for (i = 0; i < num_events; i++) { for (i = 0; i < num_events; i++) {
@@ -1088,7 +1053,7 @@ int sbi_pmu_event_get_info(unsigned long shmem_phys_lo, unsigned long shmem_phys
} }
} }
sbi_hart_protection_unmap_range(shmem_phys_lo, shmem_size); sbi_hart_unmap_saddr();
return 0; return 0;
} }

View File

@@ -15,7 +15,6 @@
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_fifo.h> #include <sbi/sbi_fifo.h>
#include <sbi/sbi_hart.h> #include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi/sbi_hsm.h> #include <sbi/sbi_hsm.h>
#include <sbi/sbi_ipi.h> #include <sbi/sbi_ipi.h>
@@ -1037,7 +1036,7 @@ int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
if (ret) if (ret)
return ret; return ret;
sbi_hart_protection_map_range(output_phys_lo, sizeof(unsigned long) * attr_count); sbi_hart_map_saddr(output_phys_lo, sizeof(unsigned long) * attr_count);
/* /*
* Copy all attributes at once since struct sse_event_attrs is matching * Copy all attributes at once since struct sse_event_attrs is matching
@@ -1050,7 +1049,7 @@ int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
attrs = (unsigned long *)output_phys_lo; attrs = (unsigned long *)output_phys_lo;
copy_attrs(attrs, &e_attrs[base_attr_id], attr_count); copy_attrs(attrs, &e_attrs[base_attr_id], attr_count);
sbi_hart_protection_unmap_range(output_phys_lo, sizeof(unsigned long) * attr_count); sbi_hart_unmap_saddr();
sse_event_put(e); sse_event_put(e);
@@ -1065,7 +1064,7 @@ static int sse_write_attrs(struct sbi_sse_event *e, uint32_t base_attr_id,
uint32_t id, end_id = base_attr_id + attr_count; uint32_t id, end_id = base_attr_id + attr_count;
unsigned long *attrs = (unsigned long *)input_phys; unsigned long *attrs = (unsigned long *)input_phys;
sbi_hart_protection_map_range(input_phys, sizeof(unsigned long) * attr_count); sbi_hart_map_saddr(input_phys, sizeof(unsigned long) * attr_count);
for (id = base_attr_id; id < end_id; id++) { for (id = base_attr_id; id < end_id; id++) {
val = attrs[attr++]; val = attrs[attr++];
@@ -1081,7 +1080,7 @@ static int sse_write_attrs(struct sbi_sse_event *e, uint32_t base_attr_id,
} }
out: out:
sbi_hart_protection_unmap_range(input_phys, sizeof(unsigned long) * attr_count); sbi_hart_unmap_saddr();
return ret; return ret;
} }

View File

@@ -87,7 +87,6 @@ void __noreturn sbi_system_reset(u32 reset_type, u32 reset_reason)
} }
static const struct sbi_system_suspend_device *suspend_dev = NULL; static const struct sbi_system_suspend_device *suspend_dev = NULL;
static bool system_suspended;
const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void) const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void)
{ {
@@ -138,19 +137,6 @@ bool sbi_system_suspend_supported(u32 sleep_type)
suspend_dev->system_suspend_check(sleep_type) == 0; suspend_dev->system_suspend_check(sleep_type) == 0;
} }
bool sbi_system_is_suspended(void)
{
return system_suspended;
}
void sbi_system_resume(void)
{
if (suspend_dev && suspend_dev->system_resume)
suspend_dev->system_resume();
system_suspended = false;
}
int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque) int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque)
{ {
struct sbi_domain *dom = sbi_domain_thishart_ptr(); struct sbi_domain *dom = sbi_domain_thishart_ptr();
@@ -203,14 +189,11 @@ int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque)
__sbi_hsm_suspend_non_ret_save(scratch); __sbi_hsm_suspend_non_ret_save(scratch);
/* Suspend */ /* Suspend */
system_suspended = true;
ret = suspend_dev->system_suspend(sleep_type, scratch->warmboot_addr); ret = suspend_dev->system_suspend(sleep_type, scratch->warmboot_addr);
if (ret != SBI_OK) { if (ret != SBI_OK) {
if (!sbi_hsm_hart_change_state(scratch, SBI_HSM_STATE_SUSPENDED, if (!sbi_hsm_hart_change_state(scratch, SBI_HSM_STATE_SUSPENDED,
SBI_HSM_STATE_STARTED)) SBI_HSM_STATE_STARTED))
sbi_hart_hang(); sbi_hart_hang();
system_suspended = false;
return ret; return ret;
} }

View File

@@ -29,7 +29,7 @@ static unsigned long tlb_fifo_off;
static unsigned long tlb_fifo_mem_off; static unsigned long tlb_fifo_mem_off;
static unsigned long tlb_range_flush_limit; static unsigned long tlb_range_flush_limit;
void __sbi_sfence_vma_all(void) static void tlb_flush_all(void)
{ {
__asm__ __volatile("sfence.vma"); __asm__ __volatile("sfence.vma");
} }
@@ -86,7 +86,7 @@ static void sbi_tlb_local_sfence_vma(struct sbi_tlb_info *tinfo)
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_SFENCE_VMA_RCVD); sbi_pmu_ctr_incr_fw(SBI_PMU_FW_SFENCE_VMA_RCVD);
if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) { if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
__sbi_sfence_vma_all(); tlb_flush_all();
return; return;
} }

View File

@@ -2,8 +2,6 @@
menu "Utils and Drivers Support" menu "Utils and Drivers Support"
source "$(OPENSBI_SRC_DIR)/lib/utils/cache/Kconfig"
source "$(OPENSBI_SRC_DIR)/lib/utils/cppc/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/cppc/Kconfig"
source "$(OPENSBI_SRC_DIR)/lib/utils/fdt/Kconfig" source "$(OPENSBI_SRC_DIR)/lib/utils/fdt/Kconfig"

View File

@@ -1,31 +0,0 @@
# SPDX-License-Identifier: BSD-2-Clause
menu "Cache Support"
config FDT_CACHE
bool "FDT based cache drivers"
depends on FDT
select CACHE
default n
if FDT_CACHE
config FDT_CACHE_SIFIVE_CCACHE
bool "SiFive CCACHE FDT cache driver"
default n
config FDT_CACHE_SIFIVE_EC
bool "SiFive EC FDT cache driver"
default n
config FDT_CACHE_SIFIVE_PL2
bool "SiFive PL2 FDT cache driver"
default n
endif
config CACHE
bool "Cache support"
default n
endmenu

View File

@@ -1,46 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <sbi/sbi_error.h>
#include <sbi_utils/cache/cache.h>
static SBI_LIST_HEAD(cache_list);
struct cache_device *cache_find(u32 id)
{
struct cache_device *dev;
sbi_list_for_each_entry(dev, &cache_list, node) {
if (dev->id == id)
return dev;
}
return NULL;
}
int cache_add(struct cache_device *dev)
{
if (!dev)
return SBI_ENODEV;
if (cache_find(dev->id))
return SBI_EALREADY;
sbi_list_add(&dev->node, &cache_list);
return SBI_OK;
}
int cache_flush_all(struct cache_device *dev)
{
if (!dev)
return SBI_ENODEV;
if (!dev->ops || !dev->ops->cache_flush_all)
return SBI_ENOTSUPP;
return dev->ops->cache_flush_all(dev);
}

View File

@@ -1,89 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <libfdt.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_heap.h>
#include <sbi_utils/cache/fdt_cache.h>
#include <sbi_utils/fdt/fdt_driver.h>
/* List of FDT cache drivers generated at compile time */
extern const struct fdt_driver *const fdt_cache_drivers[];
int fdt_cache_add(const void *fdt, int noff, struct cache_device *dev)
{
int rc;
dev->id = noff;
sbi_strncpy(dev->name, fdt_get_name(fdt, noff, NULL), sizeof(dev->name) - 1);
sbi_dprintf("%s: %s\n", __func__, dev->name);
rc = fdt_next_cache_get(fdt, noff, &dev->next);
if (rc == SBI_ENOENT)
dev->next = NULL;
else if (rc)
return rc;
return cache_add(dev);
}
static int fdt_cache_add_generic(const void *fdt, int noff)
{
struct cache_device *dev;
int rc;
dev = sbi_zalloc(sizeof(*dev));
if (!dev)
return SBI_ENOMEM;
rc = fdt_cache_add(fdt, noff, dev);
if (rc) {
sbi_free(dev);
return rc;
}
return 0;
}
static int fdt_cache_find(const void *fdt, int noff, struct cache_device **out_dev)
{
struct cache_device *dev = cache_find(noff);
int rc;
if (!dev) {
rc = fdt_driver_init_by_offset(fdt, noff, fdt_cache_drivers);
if (rc == SBI_ENODEV)
rc = fdt_cache_add_generic(fdt, noff);
if (rc)
return rc;
dev = cache_find(noff);
if (!dev)
return SBI_EFAIL;
}
if (out_dev)
*out_dev = dev;
return SBI_OK;
}
int fdt_next_cache_get(const void *fdt, int noff, struct cache_device **out_dev)
{
const fdt32_t *val;
int len;
val = fdt_getprop(fdt, noff, "next-level-cache", &len);
if (!val || len < sizeof(*val))
return SBI_ENOENT;
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(val[0]));
if (noff < 0)
return noff;
return fdt_cache_find(fdt, noff, out_dev);
}

View File

@@ -1,3 +0,0 @@
HEADER: sbi_utils/cache/fdt_cache.h
TYPE: const struct fdt_driver
NAME: fdt_cache_drivers

View File

@@ -1,114 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <libfdt.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/cache/fdt_cache.h>
#include <sbi_utils/cache/fdt_cmo_helper.h>
#include <sbi_utils/fdt/fdt_helper.h>
static unsigned long flc_offset;
#define get_hart_flc(_s) \
sbi_scratch_read_type(_s, struct cache_device *, flc_offset)
#define set_hart_flc(_s, _p) \
sbi_scratch_write_type(_s, struct cache_device *, flc_offset, _p)
int fdt_cmo_private_flc_flush_all(void)
{
struct cache_device *flc = get_hart_flc(sbi_scratch_thishart_ptr());
if (!flc || !flc->cpu_private)
return SBI_ENODEV;
return cache_flush_all(flc);
}
int fdt_cmo_llc_flush_all(void)
{
struct cache_device *llc = get_hart_flc(sbi_scratch_thishart_ptr());
if (!llc)
return SBI_ENODEV;
while (llc->next)
llc = llc->next;
return cache_flush_all(llc);
}
static int fdt_cmo_cold_init(const void *fdt)
{
struct sbi_scratch *scratch;
struct cache_device *dev;
int cpu_offset, cpus_offset, rc;
u32 hartid;
cpus_offset = fdt_path_offset(fdt, "/cpus");
if (cpus_offset < 0)
return SBI_EINVAL;
fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
rc = fdt_parse_hart_id(fdt, cpu_offset, &hartid);
if (rc)
continue;
scratch = sbi_hartid_to_scratch(hartid);
if (!scratch)
continue;
rc = fdt_next_cache_get(fdt, cpu_offset, &dev);
if (rc && rc != SBI_ENOENT)
return rc;
if (rc == SBI_ENOENT)
dev = NULL;
set_hart_flc(scratch, dev);
}
return SBI_OK;
}
static int fdt_cmo_warm_init(void)
{
struct cache_device *cur = get_hart_flc(sbi_scratch_thishart_ptr());
int rc;
while (cur) {
if (cur->ops && cur->ops->warm_init) {
rc = cur->ops->warm_init(cur);
if (rc)
return rc;
}
cur = cur->next;
}
return SBI_OK;
}
int fdt_cmo_init(bool cold_boot)
{
const void *fdt = fdt_get_address();
int rc;
if (cold_boot) {
flc_offset = sbi_scratch_alloc_type_offset(struct cache_device *);
if (!flc_offset)
return SBI_ENOMEM;
rc = fdt_cmo_cold_init(fdt);
if (rc)
return rc;
}
rc = fdt_cmo_warm_init();
if (rc)
return rc;
return SBI_OK;
}

View File

@@ -1,175 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <libfdt.h>
#include <sbi/riscv_barrier.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_heap.h>
#include <sbi_utils/cache/fdt_cache.h>
#include <sbi_utils/fdt/fdt_driver.h>
#define CCACHE_CFG_CSR 0
#define CCACHE_CMD_CSR 0x280
#define CCACHE_STATUS_CSR 0x288
#define CFG_CSR_BANK_MASK 0xff
#define CFG_CSR_WAY_MASK 0xff00
#define CFG_CSR_WAY_OFFSET 8
#define CFG_CSR_SET_MASK 0xff0000
#define CFG_CSR_SET_OFFSET 16
#define CMD_CSR_CMD_OFFSET 56
#define CMD_CSR_BANK_OFFSET 6
#define CMD_OPCODE_SETWAY 0x1ULL
#define CMD_OPCODE_OFFSET 0x2ULL
#define CFLUSH_SETWAY_CLEANINV ((CMD_OPCODE_SETWAY << CMD_OPCODE_OFFSET) | 0x3)
#define CCACHE_CMD_QLEN 0xff
#define ccache_mb_b() RISCV_FENCE(rw, o)
#define ccache_mb_a() RISCV_FENCE(o, rw)
#define CCACHE_ALL_OP_REQ_BATCH_NUM 0x10
#define CCACHE_ALL_OP_REQ_BATCH_MASK (CCACHE_CMD_QLEN + 1 - CCACHE_ALL_OP_REQ_BATCH_NUM)
struct sifive_ccache {
struct cache_device dev;
void *addr;
u64 total_lines;
};
#define to_ccache(_dev) container_of(_dev, struct sifive_ccache, dev)
static inline unsigned int sifive_ccache_read_status(void *status_addr)
{
return readl_relaxed(status_addr);
}
static inline void sifive_ccache_write_cmd(u64 cmd, void *cmd_csr_addr)
{
#if __riscv_xlen != 32
writeq_relaxed(cmd, cmd_csr_addr);
#else
/*
* The cache maintenance request is only generated when the "command"
* field (part of the high word) is written.
*/
writel_relaxed(cmd, cmd_csr_addr);
writel(cmd >> 32, cmd_csr_addr + 4);
#endif
}
static int sifive_ccache_flush_all(struct cache_device *dev)
{
struct sifive_ccache *ccache = to_ccache(dev);
void *status_addr = (char *)ccache->addr + CCACHE_STATUS_CSR;
void *cmd_csr_addr = (char *)ccache->addr + CCACHE_CMD_CSR;
u64 total_cnt = ccache->total_lines;
u64 cmd = CFLUSH_SETWAY_CLEANINV << CMD_CSR_CMD_OFFSET;
int loop_cnt = CCACHE_CMD_QLEN & CCACHE_ALL_OP_REQ_BATCH_MASK;
ccache_mb_b();
send_cmd:
total_cnt -= loop_cnt;
while (loop_cnt > 0) {
sifive_ccache_write_cmd(cmd + (0 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (1 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (2 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (3 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (4 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (5 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (6 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (7 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (8 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (9 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (10 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (11 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (12 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (13 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (14 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
sifive_ccache_write_cmd(cmd + (15 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
cmd += CCACHE_ALL_OP_REQ_BATCH_NUM << CMD_CSR_BANK_OFFSET;
loop_cnt -= CCACHE_ALL_OP_REQ_BATCH_NUM;
}
if (!total_cnt)
goto done;
/* Ensure the ccache is able receive more than 16 requests */
do {
loop_cnt = (CCACHE_CMD_QLEN - sifive_ccache_read_status(status_addr));
} while (loop_cnt < CCACHE_ALL_OP_REQ_BATCH_NUM);
loop_cnt &= CCACHE_ALL_OP_REQ_BATCH_MASK;
if (total_cnt < loop_cnt) {
loop_cnt = (total_cnt + CCACHE_ALL_OP_REQ_BATCH_NUM) & CCACHE_ALL_OP_REQ_BATCH_MASK;
cmd -= ((loop_cnt - total_cnt) << CMD_CSR_BANK_OFFSET);
total_cnt = loop_cnt;
}
goto send_cmd;
done:
do {} while (sifive_ccache_read_status(status_addr));
ccache_mb_a();
return 0;
}
static struct cache_ops sifive_ccache_ops = {
.cache_flush_all = sifive_ccache_flush_all,
};
static int sifive_ccache_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
{
struct sifive_ccache *ccache;
struct cache_device *dev;
u64 reg_addr = 0;
u32 config_csr, banks, sets, ways;
int rc;
/* find the ccache base control address */
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &reg_addr, NULL);
if (rc < 0 && reg_addr)
return SBI_ENODEV;
ccache = sbi_zalloc(sizeof(*ccache));
if (!ccache)
return SBI_ENOMEM;
dev = &ccache->dev;
dev->ops = &sifive_ccache_ops;
rc = fdt_cache_add(fdt, nodeoff, dev);
if (rc) {
sbi_free(ccache);
return rc;
}
ccache->addr = (void *)(uintptr_t)reg_addr;
/* get the info of ccache from config CSR */
config_csr = readl(ccache->addr + CCACHE_CFG_CSR);
banks = config_csr & CFG_CSR_BANK_MASK;
sets = (config_csr & CFG_CSR_SET_MASK) >> CFG_CSR_SET_OFFSET;
sets = (1 << sets);
ways = (config_csr & CFG_CSR_WAY_MASK) >> CFG_CSR_WAY_OFFSET;
ccache->total_lines = sets * ways * banks;
return SBI_OK;
}
static const struct fdt_match sifive_ccache_match[] = {
{ .compatible = "sifive,ccache2" },
{},
};
const struct fdt_driver fdt_sifive_ccache = {
.match_table = sifive_ccache_match,
.init = sifive_ccache_cold_init,
};

View File

@@ -1,195 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <libfdt.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_heap.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/cache/fdt_cache.h>
#include <sbi_utils/fdt/fdt_driver.h>
#define SIFIVE_EC_FEATURE_DISABLE_OFF 0x100UL
#define SIFIVE_EC_FLUSH_CMD_OFF 0x800UL
#define SIFIVE_EC_FLUSH_STATUS_OFF 0x808UL
#define SIFIVE_EC_FLUSH_ADDR_OFF 0x810UL
#define SIFIVE_EC_MODE_CTRL 0xa00UL
#define SIFIVE_EC_FLUSH_COMPLETION_MASK BIT(0)
#define SIFIVE_EC_CLEANINV_ALL_CMD 0x3
#define SIFIVE_EC_FEATURE_DISABLE_VAL 0
struct sifive_ec_quirks {
bool two_mode;
char *reg_name;
};
struct sifive_ec_slice {
void *addr;
bool last_slice;
};
struct sifive_ec {
struct cache_device dev;
struct sifive_ec_slice *slices;
};
#define to_ec(_dev) container_of(_dev, struct sifive_ec, dev)
static int sifive_ec_flush_all(struct cache_device *dev)
{
struct sifive_ec *ec_dev = to_ec(dev);
struct sifive_ec_slice *slices = ec_dev->slices;
u32 cmd = SIFIVE_EC_CLEANINV_ALL_CMD, i = 0;
void *addr;
do {
addr = slices[i].addr;
writel((int)-1, addr + SIFIVE_EC_FLUSH_ADDR_OFF);
writel((int)-1, addr + SIFIVE_EC_FLUSH_ADDR_OFF + sizeof(u32));
writel(cmd, addr + SIFIVE_EC_FLUSH_CMD_OFF);
} while (!slices[i++].last_slice);
i = 0;
do {
addr = slices[i].addr;
do {} while (!(readl(addr + SIFIVE_EC_FLUSH_STATUS_OFF) &
SIFIVE_EC_FLUSH_COMPLETION_MASK));
} while (!slices[i++].last_slice);
return 0;
}
int sifive_ec_warm_init(struct cache_device *dev)
{
struct sifive_ec *ec_dev = to_ec(dev);
struct sifive_ec_slice *slices = ec_dev->slices;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
int i = 0;
if (dom->boot_hartid == current_hartid()) {
do {
writel(SIFIVE_EC_FEATURE_DISABLE_VAL,
slices[i].addr + SIFIVE_EC_FEATURE_DISABLE_OFF);
} while (!slices[i++].last_slice);
}
return SBI_OK;
}
static struct cache_ops sifive_ec_ops = {
.warm_init = sifive_ec_warm_init,
.cache_flush_all = sifive_ec_flush_all,
};
static int sifive_ec_slices_cold_init(const void *fdt, int nodeoff,
struct sifive_ec_slice *slices,
const struct sifive_ec_quirks *quirks)
{
int rc, subnode, slice_idx = -1;
u64 reg_addr, size, start_addr = -1, end_addr = 0;
fdt_for_each_subnode(subnode, fdt, nodeoff) {
rc = fdt_get_node_addr_size_by_name(fdt, subnode, quirks->reg_name, &reg_addr,
&size);
if (rc < 0)
return SBI_ENODEV;
if (reg_addr < start_addr)
start_addr = reg_addr;
if (reg_addr + size > end_addr)
end_addr = reg_addr + size;
slices[++slice_idx].addr = (void *)(uintptr_t)reg_addr;
}
slices[slice_idx].last_slice = true;
/* Only enable the pmp to protect the EC m-mode region when it support two mode */
if (quirks->two_mode) {
rc = sbi_domain_root_add_memrange((unsigned long)start_addr,
(unsigned long)(end_addr - start_addr),
BIT(12),
(SBI_DOMAIN_MEMREGION_MMIO |
SBI_DOMAIN_MEMREGION_M_READABLE |
SBI_DOMAIN_MEMREGION_M_WRITABLE));
if (rc)
return rc;
}
return SBI_OK;
}
static int sifive_ec_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
{
const struct sifive_ec_quirks *quirks = match->data;
struct sifive_ec_slice *slices;
struct sifive_ec *ec_dev;
struct cache_device *dev;
int subnode, rc = SBI_ENOMEM;
u32 slice_count = 0;
/* Count the number of slices */
fdt_for_each_subnode(subnode, fdt, nodeoff)
slice_count++;
/* Need at least one slice */
if (!slice_count)
return SBI_EINVAL;
ec_dev = sbi_zalloc(sizeof(*ec_dev));
if (!ec_dev)
return SBI_ENOMEM;
slices = sbi_zalloc(slice_count * sizeof(*slices));
if (!slices)
goto free_ec;
rc = sifive_ec_slices_cold_init(fdt, nodeoff, slices, quirks);
if (rc)
goto free_slice;
dev = &ec_dev->dev;
dev->ops = &sifive_ec_ops;
rc = fdt_cache_add(fdt, nodeoff, dev);
if (rc)
goto free_slice;
ec_dev->slices = slices;
return SBI_OK;
free_slice:
sbi_free(slices);
free_ec:
sbi_free(ec_dev);
return rc;
}
static const struct sifive_ec_quirks sifive_extensiblecache0_quirks = {
.two_mode = false,
.reg_name = "control",
};
static const struct sifive_ec_quirks sifive_extensiblecache4_quirks = {
.two_mode = true,
.reg_name = "m_mode",
};
static const struct fdt_match sifive_ec_match[] = {
{ .compatible = "sifive,extensiblecache4", .data = &sifive_extensiblecache4_quirks },
{ .compatible = "sifive,extensiblecache3", .data = &sifive_extensiblecache0_quirks },
{ .compatible = "sifive,extensiblecache2", .data = &sifive_extensiblecache0_quirks },
{ .compatible = "sifive,extensiblecache0", .data = &sifive_extensiblecache0_quirks },
{},
};
struct fdt_driver fdt_sifive_ec = {
.match_table = sifive_ec_match,
.init = sifive_ec_cold_init,
};

View File

@@ -1,139 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive Inc.
*/
#include <libfdt.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_heap.h>
#include <sbi_utils/cache/fdt_cache.h>
#include <sbi_utils/fdt/fdt_driver.h>
#define FLUSH64_CMD_TARGET_ALL (0x2 << 3)
#define FLUSH64_CMD_TYPE_FLUSH 0x3ULL
#define SIFIVE_PL2CACHE_CMD_QLEN 0xff
#define SIFIVE_PL2CACHE_FLUSH64_OFF 0x200ULL
#define SIFIVE_PL2CACHE_STATUS_OFF 0x208ULL
#define SIFIVE_PL2CACHE_CONFIG1_OFF 0x1000ULL
#define SIFIVE_PL2CACHE_CONFIG0_OFF 0x1008ULL
#define FLUSH64_CMD_POS 56
#define REGIONCLOCKDISABLE_MASK BIT(3)
#define CONFIG0_ACCEPT_DIRTY_DATA_ENABLE BIT(24)
struct sifive_pl2_quirks {
bool no_dirty_fill;
};
struct sifive_pl2 {
struct cache_device dev;
void *addr;
bool no_dirty_fill;
};
#define to_pl2(_dev) container_of(_dev, struct sifive_pl2, dev)
static int sifive_pl2_flush_all(struct cache_device *dev)
{
struct sifive_pl2 *pl2_dev = to_pl2(dev);
char *addr = pl2_dev->addr;
u64 cmd = (FLUSH64_CMD_TARGET_ALL | FLUSH64_CMD_TYPE_FLUSH) << FLUSH64_CMD_POS;
u32 config0;
/*
* While flushing pl2 cache, a speculative load might causes a dirty line pull
* into PL2. It will cause the SiFive SMC0 refuse to enter the power gating.
* Disable the ACCEPT_DIRTY_DATA_ENABLE to avoid the issue.
*/
if (pl2_dev->no_dirty_fill) {
config0 = readl((void *)addr + SIFIVE_PL2CACHE_CONFIG0_OFF);
config0 &= ~CONFIG0_ACCEPT_DIRTY_DATA_ENABLE;
writel(config0, (void *)addr + SIFIVE_PL2CACHE_CONFIG0_OFF);
}
#if __riscv_xlen != 32
writeq(cmd, addr + SIFIVE_PL2CACHE_FLUSH64_OFF);
#else
writel((u32)cmd, addr + SIFIVE_PL2CACHE_FLUSH64_OFF);
writel((u32)(cmd >> 32), addr + SIFIVE_PL2CACHE_FLUSH64_OFF + sizeof(u32));
#endif
do {} while (readl(addr + SIFIVE_PL2CACHE_STATUS_OFF) & SIFIVE_PL2CACHE_CMD_QLEN);
return 0;
}
static int sifive_pl2_warm_init(struct cache_device *dev)
{
struct sifive_pl2 *pl2_dev = to_pl2(dev);
char *addr = pl2_dev->addr;
u32 val;
/* Enabling the clock gating */
val = readl(addr + SIFIVE_PL2CACHE_CONFIG1_OFF);
val &= (~REGIONCLOCKDISABLE_MASK);
writel(val, addr + SIFIVE_PL2CACHE_CONFIG1_OFF);
return 0;
}
static struct cache_ops sifive_pl2_ops = {
.warm_init = sifive_pl2_warm_init,
.cache_flush_all = sifive_pl2_flush_all,
};
static int sifive_pl2_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
{
const struct sifive_pl2_quirks *quirk = match->data;
struct sifive_pl2 *pl2_dev;
struct cache_device *dev;
u64 reg_addr;
int rc;
/* find the pl2 control base address */
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &reg_addr, NULL);
if (rc < 0 && reg_addr)
return SBI_ENODEV;
pl2_dev = sbi_zalloc(sizeof(*pl2_dev));
if (!pl2_dev)
return SBI_ENOMEM;
dev = &pl2_dev->dev;
dev->ops = &sifive_pl2_ops;
dev->cpu_private = true;
rc = fdt_cache_add(fdt, nodeoff, dev);
if (rc)
return rc;
pl2_dev->addr = (void *)(uintptr_t)reg_addr;
if (quirk)
pl2_dev->no_dirty_fill = quirk->no_dirty_fill;
return 0;
}
static const struct sifive_pl2_quirks pl2cache2_quirks = {
.no_dirty_fill = true,
};
static const struct sifive_pl2_quirks pl2cache0_quirks = {
.no_dirty_fill = false,
};
static const struct fdt_match sifive_pl2_match[] = {
{ .compatible = "sifive,pl2cache2", .data = &pl2cache2_quirks },
{ .compatible = "sifive,pl2cache1", .data = &pl2cache0_quirks },
{ .compatible = "sifive,pl2cache0", .data = &pl2cache0_quirks },
{},
};
struct fdt_driver fdt_sifive_pl2 = {
.match_table = sifive_pl2_match,
.init = sifive_pl2_cold_init,
};

View File

@@ -1,20 +0,0 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 SiFive
#
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cache.o
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cache_drivers.carray.o
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cmo_helper.o
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_CCACHE) += fdt_sifive_ccache
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_CCACHE) += cache/fdt_sifive_ccache.o
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_PL2) += fdt_sifive_pl2
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_PL2) += cache/fdt_sifive_pl2.o
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_EC) += fdt_sifive_ec
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_EC) += cache/fdt_sifive_ec.o
libsbiutils-objs-$(CONFIG_CACHE) += cache/cache.o

View File

@@ -185,7 +185,7 @@ static void fdt_domain_based_fixup_one(void *fdt, int nodeoff)
return; return;
if (!sbi_domain_check_addr(dom, reg_addr, dom->next_mode, if (!sbi_domain_check_addr(dom, reg_addr, dom->next_mode,
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE | SBI_DOMAIN_MMIO)) { SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) {
rc = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + 32); rc = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + 32);
if (rc < 0) if (rc < 0)
return; return;

View File

@@ -14,15 +14,6 @@ config FDT_HSM_RPMI
depends on FDT_MAILBOX && RPMI_MAILBOX depends on FDT_MAILBOX && RPMI_MAILBOX
default n default n
config FDT_HSM_SIFIVE_TMC0
bool "FDT SiFive TMC v0 driver"
depends on FDT_CACHE
default n
config FDT_HSM_SPACEMIT
bool "FDT SPACEMIT HSM driver"
default n
endif endif
endmenu endmenu

View File

@@ -1,367 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive
*/
#include <libfdt.h>
#include <sbi/riscv_asm.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_heap.h>
#include <sbi/sbi_hsm.h>
#include <sbi/sbi_ipi.h>
#include <sbi_utils/cache/fdt_cmo_helper.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/hsm/fdt_hsm_sifive_inst.h>
#include <sbi_utils/hsm/fdt_hsm_sifive_tmc0.h>
struct sifive_tmc0 {
unsigned long reg;
struct sbi_dlist node;
u32 id;
};
static SBI_LIST_HEAD(tmc0_list);
static unsigned long tmc0_offset;
#define tmc0_ptr_get(__scratch) \
sbi_scratch_read_type((__scratch), struct sifive_tmc0 *, tmc0_offset)
#define tmc0_ptr_set(__scratch, __tmc0) \
sbi_scratch_write_type((__scratch), struct sifive_tmc0 *, tmc0_offset, (__tmc0))
/* TMC.PGPREP */
#define SIFIVE_TMC_PGPREP_OFF 0x0
#define SIFIVE_TMC_PGPREP_ENA_REQ BIT(31)
#define SIFIVE_TMC_PGPREP_ENA_ACK BIT(30)
#define SIFIVE_TMC_PGPREP_DIS_REQ BIT(29)
#define SIFIVE_TMC_PGPREP_DIS_ACK BIT(28)
#define SIFIVE_TMC_PGPREP_CLFPNOTQ BIT(18)
#define SIFIVE_TMC_PGPREP_PMCENAERR BIT(17)
#define SIFIVE_TMC_PGPREP_PMCDENY BIT(16)
#define SIFIVE_TMC_PGPREP_BUSERR BIT(15)
#define SIFIVE_TMC_PGPREP_WAKE_DETECT BIT(12)
#define SIFIVE_TMC_PGPREP_INTERNAL_ABORT BIT(2)
#define SIFIVE_TMC_PGPREP_ENARSP (SIFIVE_TMC_PGPREP_CLFPNOTQ | \
SIFIVE_TMC_PGPREP_PMCENAERR | \
SIFIVE_TMC_PGPREP_PMCDENY | \
SIFIVE_TMC_PGPREP_BUSERR | \
SIFIVE_TMC_PGPREP_WAKE_DETECT)
/* TMC.PG */
#define SIFIVE_TMC_PG_OFF 0x4
#define SIFIVE_TMC_PG_ENA_REQ BIT(31)
#define SIFIVE_TMC_PG_ENA_ACK BIT(30)
#define SIFIVE_TMC_PG_DIS_REQ BIT(29)
#define SIFIVE_TMC_PG_DIS_ACK BIT(28)
#define SIFIVE_TMC_PG_PMC_ENA_ERR BIT(17)
#define SIFIVE_TMC_PG_PMC_DENY BIT(16)
#define SIFIVE_TMC_PG_BUS_ERR BIT(15)
#define SIFIVE_TMC_PG_MASTNOTQ BIT(14)
#define SIFIVE_TMC_PG_WARM_RESET BIT(1)
#define SIFIVE_TMC_PG_ENARSP (SIFIVE_TMC_PG_PMC_ENA_ERR | \
SIFIVE_TMC_PG_PMC_DENY | \
SIFIVE_TMC_PG_BUS_ERR | \
SIFIVE_TMC_PG_MASTNOTQ)
/* TMC.RESUMEPC */
#define SIFIVE_TMC_RESUMEPC_LO 0x10
#define SIFIVE_TMC_RESUMEPC_HI 0x14
/* TMC.WAKEMASK */
#define SIFIVE_TMC_WAKE_MASK_OFF 0x20
#define SIFIVE_TMC_WAKE_MASK_WREQ BIT(31)
#define SIFIVE_TMC_WAKE_MASK_ACK BIT(30)
int sifive_tmc0_set_wakemask_enareq(u32 hartid)
{
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
unsigned long addr;
u32 v;
if (!tmc0)
return SBI_ENODEV;
addr = tmc0->reg + SIFIVE_TMC_WAKE_MASK_OFF;
v = readl((void *)addr);
writel(v | SIFIVE_TMC_WAKE_MASK_WREQ, (void *)addr);
while (!(readl((void *)addr) & SIFIVE_TMC_WAKE_MASK_ACK));
return SBI_OK;
}
void sifive_tmc0_set_wakemask_disreq(u32 hartid)
{
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
unsigned long addr;
u32 v;
if (!tmc0)
return;
addr = tmc0->reg + SIFIVE_TMC_WAKE_MASK_OFF;
v = readl((void *)addr);
writel(v & ~SIFIVE_TMC_WAKE_MASK_WREQ, (void *)addr);
while (readl((void *)addr) & SIFIVE_TMC_WAKE_MASK_ACK);
}
bool sifive_tmc0_is_pg(u32 hartid)
{
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
unsigned long addr;
u32 v;
if (!tmc0)
return false;
addr = tmc0->reg + SIFIVE_TMC_PG_OFF;
v = readl((void *)addr);
if (!(v & SIFIVE_TMC_PG_ENA_ACK) ||
(v & SIFIVE_TMC_PG_ENARSP) ||
(v & SIFIVE_TMC_PG_DIS_REQ))
return false;
return true;
}
static void sifive_tmc0_set_resumepc(physical_addr_t addr)
{
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
writel((u32)addr, (void *)(tmc0->reg + SIFIVE_TMC_RESUMEPC_LO));
#if __riscv_xlen > 32
writel((u32)(addr >> 32), (void *)(tmc0->reg + SIFIVE_TMC_RESUMEPC_HI));
#endif
}
static u32 sifive_tmc0_set_pgprep_enareq(void)
{
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
u32 v = readl((void *)reg);
writel(v | SIFIVE_TMC_PGPREP_ENA_REQ, (void *)reg);
while (!(readl((void *)reg) & SIFIVE_TMC_PGPREP_ENA_ACK));
v = readl((void *)reg);
return v & SIFIVE_TMC_PGPREP_INTERNAL_ABORT;
}
static void sifive_tmc0_set_pgprep_disreq(void)
{
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
u32 v = readl((void *)reg);
writel(v | SIFIVE_TMC_PGPREP_DIS_REQ, (void *)reg);
while (!(readl((void *)reg) & SIFIVE_TMC_PGPREP_DIS_ACK));
}
static u32 sifive_tmc0_get_pgprep_enarsp(void)
{
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
u32 v = readl((void *)reg);
return v & SIFIVE_TMC_PGPREP_ENARSP;
}
static void sifive_tmc0_set_pg_enareq(void)
{
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
unsigned long reg = tmc0->reg + SIFIVE_TMC_PG_OFF;
u32 v = readl((void *)reg);
writel(v | SIFIVE_TMC_PG_ENA_REQ, (void *)reg);
}
static int sifive_tmc0_prep(void)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
u32 rc;
if (!tmc0_ptr_get(scratch))
return SBI_ENODEV;
rc = sifive_tmc0_set_pgprep_enareq();
if (rc) {
sbi_printf("TMC0 error: Internal Abort (Wake detect)\n");
goto fail;
}
rc = sifive_tmc0_get_pgprep_enarsp();
if (rc) {
sifive_tmc0_set_pgprep_disreq();
sbi_printf("TMC0 error: error response code: 0x%x\n", rc);
goto fail;
}
sifive_tmc0_set_resumepc(scratch->warmboot_addr);
return SBI_OK;
fail:
return SBI_EFAIL;
}
static int sifive_tmc0_enter(void)
{
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
u32 rc;
/* Flush cache and check if there is wake detect or bus error */
if (fdt_cmo_private_flc_flush_all() &&
sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1))
sifive_cflush();
rc = sifive_tmc0_get_pgprep_enarsp();
if (rc) {
sbi_printf("TMC0 error: error response code: 0x%x\n", rc);
goto fail;
}
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CEASE)) {
sifive_tmc0_set_pg_enareq();
while (1)
sifive_cease();
}
rc = SBI_ENOTSUPP;
fail:
sifive_tmc0_set_pgprep_disreq();
return rc;
}
static int sifive_tmc0_tile_pg(void)
{
int rc;
rc = sifive_tmc0_prep();
if (rc)
return rc;
return sifive_tmc0_enter();
}
static int sifive_tmc0_start(u32 hartid, ulong saddr)
{
/*
* In system suspend, the IMSIC will be reset in SiFive platform so
* we use the CLINT IPI as the wake event.
*/
sbi_ipi_raw_send(sbi_hartid_to_hartindex(hartid), true);
return SBI_OK;
}
static int sifive_tmc0_stop(void)
{
unsigned long mie = csr_read(CSR_MIE);
int rc;
/* Set IPI as wake up source */
csr_set(CSR_MIE, MIP_MEIP | MIP_MSIP);
rc = sifive_tmc0_tile_pg();
if (rc) {
csr_write(CSR_MIE, mie);
return rc;
}
return SBI_OK;
}
static struct sbi_hsm_device tmc0_hsm_dev = {
.name = "SiFive TMC0",
.hart_start = sifive_tmc0_start,
.hart_stop = sifive_tmc0_stop,
};
static int sifive_tmc0_bind_cpu(struct sifive_tmc0 *tmc0)
{
const void *fdt = fdt_get_address();
struct sbi_scratch *scratch;
int cpus_off, cpu_off, rc;
const fdt32_t *val;
u32 hartid;
cpus_off = fdt_path_offset(fdt, "/cpus");
if (cpus_off < 0)
return SBI_ENOENT;
fdt_for_each_subnode(cpu_off, fdt, cpus_off) {
rc = fdt_parse_hart_id(fdt, cpu_off, &hartid);
if (rc)
continue;
scratch = sbi_hartid_to_scratch(hartid);
if (!scratch)
continue;
val = fdt_getprop(fdt, cpu_off, "power-domains", NULL);
if (!val)
return SBI_ENOENT;
if (tmc0->id == fdt32_to_cpu(val[0])) {
tmc0_ptr_set(scratch, tmc0);
return SBI_OK;
}
}
return SBI_ENODEV;
}
static int sifive_tmc0_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
{
struct sifive_tmc0 *tmc0;
u64 addr;
int rc;
if (!tmc0_offset) {
tmc0_offset = sbi_scratch_alloc_type_offset(struct sifive_tmc0 *);
if (!tmc0_offset)
return SBI_ENOMEM;
sbi_hsm_set_device(&tmc0_hsm_dev);
}
tmc0 = sbi_zalloc(sizeof(*tmc0));
if (!tmc0)
return SBI_ENOMEM;
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &addr, NULL);
if (rc)
goto free_tmc0;
tmc0->reg = (unsigned long)addr;
tmc0->id = fdt_get_phandle(fdt_get_address(), nodeoff);
rc = sifive_tmc0_bind_cpu(tmc0);
if (rc)
goto free_tmc0;
return SBI_OK;
free_tmc0:
sbi_free(tmc0);
return rc;
}
static const struct fdt_match sifive_tmc0_match[] = {
{ .compatible = "sifive,tmc0" },
{ },
};
const struct fdt_driver fdt_hsm_sifive_tmc0 = {
.match_table = sifive_tmc0_match,
.init = sifive_tmc0_probe,
};

View File

@@ -1,140 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SpacemiT
* Authors:
* Xianbin Zhu <xianbin.zhu@linux.spacemit.com>
* Troy Mitchell <troy.mitchell@linux.spacemit.com>
*/
#include <platform_override.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_hsm.h>
#include <spacemit/k1.h>
static const u64 cpu_wakeup_reg[] = {
PMU_AP_CORE0_WAKEUP,
PMU_AP_CORE1_WAKEUP,
PMU_AP_CORE2_WAKEUP,
PMU_AP_CORE3_WAKEUP,
PMU_AP_CORE4_WAKEUP,
PMU_AP_CORE5_WAKEUP,
PMU_AP_CORE6_WAKEUP,
PMU_AP_CORE7_WAKEUP,
};
static const u64 cpu_idle_reg[] = {
PMU_AP_CORE0_IDLE_CFG,
PMU_AP_CORE1_IDLE_CFG,
PMU_AP_CORE2_IDLE_CFG,
PMU_AP_CORE3_IDLE_CFG,
PMU_AP_CORE4_IDLE_CFG,
PMU_AP_CORE5_IDLE_CFG,
PMU_AP_CORE6_IDLE_CFG,
PMU_AP_CORE7_IDLE_CFG,
};
static inline void spacemit_set_cpu_power(u32 hartid, bool enable)
{
unsigned int value;
unsigned int *cpu_idle_base = (unsigned int *)(unsigned long)cpu_idle_reg[hartid];
value = readl(cpu_idle_base);
if (enable)
value &= ~PMU_AP_IDLE_PWRDOWN_MASK;
else
value |= PMU_AP_IDLE_PWRDOWN_MASK;
writel(value, cpu_idle_base);
}
static void spacemit_wakeup_cpu(u32 mpidr)
{
unsigned int *cpu_reset_base;
unsigned int cur_hartid = current_hartid();
cpu_reset_base = (unsigned int *)(unsigned long)cpu_wakeup_reg[cur_hartid];
writel(1 << mpidr, cpu_reset_base);
}
static void spacemit_assert_cpu(void)
{
spacemit_set_cpu_power(current_hartid(), false);
}
static void spacemit_deassert_cpu(unsigned int hartid)
{
spacemit_set_cpu_power(hartid, true);
}
/* Start (or power-up) the given hart */
static int spacemit_hart_start(unsigned int hartid, unsigned long saddr)
{
spacemit_deassert_cpu(hartid);
spacemit_wakeup_cpu(hartid);
return 0;
}
/*
* Stop (or power-down) the current hart from running. This call
* doesn't expect to return if success.
*/
static int spacemit_hart_stop(void)
{
csr_write(CSR_STIMECMP, GENMASK_ULL(63, 0));
csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP);
/* disable data preftch */
csr_clear(CSR_MSETUP, MSETUP_PFE);
asm volatile ("fence iorw, iorw");
/* flush local dcache */
csr_write(CSR_MRAOP, MRAOP_ICACHE_INVALID);
asm volatile ("fence iorw, iorw");
/* disable dcache */
csr_clear(CSR_MSETUP, MSETUP_DE);
asm volatile ("fence iorw, iorw");
/*
* Core4-7 do not have dedicated bits in ML2SETUP;
* instead, they reuse the same bits as core0-3.
*
* Thereforspacemit_deassert_cpue, use modulo with PLATFORM_MAX_CPUS_PER_CLUSTER
* to select the proper bit.
*/
csr_clear(CSR_ML2SETUP, 1 << (current_hartid() % PLATFORM_MAX_CPUS_PER_CLUSTER));
asm volatile ("fence iorw, iorw");
spacemit_assert_cpu();
wfi();
return SBI_ENOTSUPP;
}
static const struct sbi_hsm_device spacemit_hsm_ops = {
.name = "spacemit-hsm",
.hart_start = spacemit_hart_start,
.hart_stop = spacemit_hart_stop,
};
static int spacemit_hsm_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
{
sbi_hsm_set_device(&spacemit_hsm_ops);
return 0;
}
static const struct fdt_match spacemit_hsm_match[] = {
{ .compatible = "spacemit,k1" },
{ },
};
const struct fdt_driver fdt_hsm_spacemit = {
.match_table = spacemit_hsm_match,
.init = spacemit_hsm_probe,
};

View File

@@ -9,9 +9,3 @@
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_RPMI) += fdt_hsm_rpmi carray-fdt_early_drivers-$(CONFIG_FDT_HSM_RPMI) += fdt_hsm_rpmi
libsbiutils-objs-$(CONFIG_FDT_HSM_RPMI) += hsm/fdt_hsm_rpmi.o libsbiutils-objs-$(CONFIG_FDT_HSM_RPMI) += hsm/fdt_hsm_rpmi.o
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_SPACEMIT) += fdt_hsm_spacemit
libsbiutils-objs-$(CONFIG_FDT_HSM_SPACEMIT) += hsm/fdt_hsm_spacemit.o
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_SIFIVE_TMC0) += fdt_hsm_sifive_tmc0
libsbiutils-objs-$(CONFIG_FDT_HSM_SIFIVE_TMC0) += hsm/fdt_hsm_sifive_tmc0.o

View File

@@ -62,7 +62,6 @@ static void mswi_ipi_clear(void)
static struct sbi_ipi_device aclint_mswi = { static struct sbi_ipi_device aclint_mswi = {
.name = "aclint-mswi", .name = "aclint-mswi",
.rating = 100,
.ipi_send = mswi_ipi_send, .ipi_send = mswi_ipi_send,
.ipi_clear = mswi_ipi_clear .ipi_clear = mswi_ipi_clear
}; };
@@ -107,7 +106,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi)
if (rc) if (rc)
return rc; return rc;
sbi_ipi_add_device(&aclint_mswi); sbi_ipi_set_device(&aclint_mswi);
return 0; return 0;
} }

View File

@@ -61,7 +61,6 @@ static void plicsw_ipi_clear(void)
static struct sbi_ipi_device plicsw_ipi = { static struct sbi_ipi_device plicsw_ipi = {
.name = "andes_plicsw", .name = "andes_plicsw",
.rating = 200,
.ipi_send = plicsw_ipi_send, .ipi_send = plicsw_ipi_send,
.ipi_clear = plicsw_ipi_clear .ipi_clear = plicsw_ipi_clear
}; };
@@ -100,7 +99,7 @@ int plicsw_cold_ipi_init(struct plicsw_data *plicsw)
if (rc) if (rc)
return rc; return rc;
sbi_ipi_add_device(&plicsw_ipi); sbi_ipi_set_device(&plicsw_ipi);
return 0; return 0;
} }

22
lib/utils/ipi/fdt_ipi.c Normal file
View File

@@ -0,0 +1,22 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#include <sbi_utils/ipi/fdt_ipi.h>
/* List of FDT ipi drivers generated at compile time */
extern const struct fdt_driver *const fdt_ipi_drivers[];
int fdt_ipi_init(void)
{
/*
* On some single-hart system there is no need for IPIs,
* so do not return a failure if no device is found.
*/
return fdt_driver_init_all(fdt_get_address(), fdt_ipi_drivers);
}

View File

@@ -0,0 +1,3 @@
HEADER: sbi_utils/ipi/fdt_ipi.h
TYPE: const struct fdt_driver
NAME: fdt_ipi_drivers

View File

@@ -9,8 +9,8 @@
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_heap.h> #include <sbi/sbi_heap.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/fdt/fdt_helper.h> #include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/ipi/fdt_ipi.h>
#include <sbi_utils/ipi/aclint_mswi.h> #include <sbi_utils/ipi/aclint_mswi.h>
static int ipi_mswi_cold_init(const void *fdt, int nodeoff, static int ipi_mswi_cold_init(const void *fdt, int nodeoff,
@@ -57,7 +57,6 @@ static const struct fdt_match ipi_mswi_match[] = {
{ .compatible = "sifive,clint0", .data = &clint_offset }, { .compatible = "sifive,clint0", .data = &clint_offset },
{ .compatible = "thead,c900-clint", .data = &clint_offset }, { .compatible = "thead,c900-clint", .data = &clint_offset },
{ .compatible = "thead,c900-aclint-mswi" }, { .compatible = "thead,c900-aclint-mswi" },
{ .compatible = "mips,p8700-aclint-mswi" },
{ .compatible = "riscv,aclint-mswi" }, { .compatible = "riscv,aclint-mswi" },
{ }, { },
}; };

View File

@@ -11,8 +11,8 @@
*/ */
#include <sbi/riscv_io.h> #include <sbi/riscv_io.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/fdt/fdt_helper.h> #include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/ipi/fdt_ipi.h>
#include <sbi_utils/ipi/andes_plicsw.h> #include <sbi_utils/ipi/andes_plicsw.h>
extern struct plicsw_data plicsw; extern struct plicsw_data plicsw;

View File

@@ -10,8 +10,11 @@
libsbiutils-objs-$(CONFIG_IPI_MSWI) += ipi/aclint_mswi.o libsbiutils-objs-$(CONFIG_IPI_MSWI) += ipi/aclint_mswi.o
libsbiutils-objs-$(CONFIG_IPI_PLICSW) += ipi/andes_plicsw.o libsbiutils-objs-$(CONFIG_IPI_PLICSW) += ipi/andes_plicsw.o
carray-fdt_early_drivers-$(CONFIG_FDT_IPI_MSWI) += fdt_ipi_mswi libsbiutils-objs-$(CONFIG_FDT_IPI) += ipi/fdt_ipi.o
libsbiutils-objs-$(CONFIG_FDT_IPI) += ipi/fdt_ipi_drivers.carray.o
carray-fdt_ipi_drivers-$(CONFIG_FDT_IPI_MSWI) += fdt_ipi_mswi
libsbiutils-objs-$(CONFIG_FDT_IPI_MSWI) += ipi/fdt_ipi_mswi.o libsbiutils-objs-$(CONFIG_FDT_IPI_MSWI) += ipi/fdt_ipi_mswi.o
carray-fdt_early_drivers-$(CONFIG_FDT_IPI_PLICSW) += fdt_ipi_plicsw carray-fdt_ipi_drivers-$(CONFIG_FDT_IPI_PLICSW) += fdt_ipi_plicsw
libsbiutils-objs-$(CONFIG_FDT_IPI_PLICSW) += ipi/fdt_ipi_plicsw.o libsbiutils-objs-$(CONFIG_FDT_IPI_PLICSW) += ipi/fdt_ipi_plicsw.o

View File

@@ -115,96 +115,16 @@
#define APLIC_DISABLE_ITHRESHOLD 1 #define APLIC_DISABLE_ITHRESHOLD 1
#define APLIC_ENABLE_ITHRESHOLD 0 #define APLIC_ENABLE_ITHRESHOLD 0
static SBI_LIST_HEAD(aplic_list);
static void aplic_writel_msicfg(struct aplic_msicfg_data *msicfg,
void *msicfgaddr, void *msicfgaddrH);
static void aplic_init(struct aplic_data *aplic)
{
struct aplic_delegate_data *deleg;
u32 i, j, tmp;
int locked;
/* Set domain configuration to 0 */
writel(0, (void *)(aplic->addr + APLIC_DOMAINCFG));
/* Disable all interrupts */
for (i = 0; i <= aplic->num_source; i += 32)
writel(-1U, (void *)(aplic->addr + APLIC_CLRIE_BASE +
(i / 32) * sizeof(u32)));
/* Set interrupt type and priority for all interrupts */
for (i = 1; i <= aplic->num_source; i++) {
/* Set IRQ source configuration to 0 */
writel(0, (void *)(aplic->addr + APLIC_SOURCECFG_BASE +
(i - 1) * sizeof(u32)));
/* Set IRQ target hart index and priority to 1 */
writel(APLIC_DEFAULT_PRIORITY, (void *)(aplic->addr +
APLIC_TARGET_BASE +
(i - 1) * sizeof(u32)));
}
/* Configure IRQ delegation */
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
deleg = &aplic->delegate[i];
if (!deleg->first_irq || !deleg->last_irq)
continue;
if (aplic->num_source < deleg->first_irq ||
aplic->num_source < deleg->last_irq)
continue;
if (deleg->child_index > APLIC_SOURCECFG_CHILDIDX_MASK)
continue;
if (deleg->first_irq > deleg->last_irq) {
tmp = deleg->first_irq;
deleg->first_irq = deleg->last_irq;
deleg->last_irq = tmp;
}
for (j = deleg->first_irq; j <= deleg->last_irq; j++)
writel(APLIC_SOURCECFG_D | deleg->child_index,
(void *)(aplic->addr + APLIC_SOURCECFG_BASE +
(j - 1) * sizeof(u32)));
}
/* Default initialization of IDC structures */
for (i = 0; i < aplic->num_idc; i++) {
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
i * APLIC_IDC_SIZE + APLIC_IDC_IDELIVERY));
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
i * APLIC_IDC_SIZE + APLIC_IDC_IFORCE));
writel(APLIC_DISABLE_ITHRESHOLD, (void *)(aplic->addr +
APLIC_IDC_BASE +
(i * APLIC_IDC_SIZE) +
APLIC_IDC_ITHRESHOLD));
}
/* MSI configuration */
locked = readl((void *)(aplic->addr + APLIC_MMSICFGADDRH)) & APLIC_xMSICFGADDRH_L;
if (aplic->targets_mmode && aplic->has_msicfg_mmode && !locked) {
aplic_writel_msicfg(&aplic->msicfg_mmode,
(void *)(aplic->addr + APLIC_MMSICFGADDR),
(void *)(aplic->addr + APLIC_MMSICFGADDRH));
}
if (aplic->targets_mmode && aplic->has_msicfg_smode && !locked) {
aplic_writel_msicfg(&aplic->msicfg_smode,
(void *)(aplic->addr + APLIC_SMSICFGADDR),
(void *)(aplic->addr + APLIC_SMSICFGADDRH));
}
}
void aplic_reinit_all(void)
{
struct aplic_data *aplic;
sbi_list_for_each_entry(aplic, &aplic_list, node)
aplic_init(aplic);
}
static void aplic_writel_msicfg(struct aplic_msicfg_data *msicfg, static void aplic_writel_msicfg(struct aplic_msicfg_data *msicfg,
void *msicfgaddr, void *msicfgaddrH) void *msicfgaddr, void *msicfgaddrH)
{ {
u32 val; u32 val;
unsigned long base_ppn; unsigned long base_ppn;
/* Check if MSI config is already locked */
if (readl(msicfgaddrH) & APLIC_xMSICFGADDRH_L)
return;
/* Compute the MSI base PPN */ /* Compute the MSI base PPN */
base_ppn = msicfg->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT; base_ppn = msicfg->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(msicfg->lhxs); base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(msicfg->lhxs);
@@ -248,8 +168,9 @@ static int aplic_check_msicfg(struct aplic_msicfg_data *msicfg)
int aplic_cold_irqchip_init(struct aplic_data *aplic) int aplic_cold_irqchip_init(struct aplic_data *aplic)
{ {
int rc; int rc;
u32 i, j, tmp;
struct aplic_delegate_data *deleg; struct aplic_delegate_data *deleg;
u32 first_deleg_irq, last_deleg_irq, i; u32 first_deleg_irq, last_deleg_irq;
/* Sanity checks */ /* Sanity checks */
if (!aplic || if (!aplic ||
@@ -267,24 +188,81 @@ int aplic_cold_irqchip_init(struct aplic_data *aplic)
return rc; return rc;
} }
/* Init the APLIC registers */ /* Set domain configuration to 0 */
aplic_init(aplic); writel(0, (void *)(aplic->addr + APLIC_DOMAINCFG));
/* Disable all interrupts */
for (i = 0; i <= aplic->num_source; i += 32)
writel(-1U, (void *)(aplic->addr + APLIC_CLRIE_BASE +
(i / 32) * sizeof(u32)));
/* Set interrupt type and priority for all interrupts */
for (i = 1; i <= aplic->num_source; i++) {
/* Set IRQ source configuration to 0 */
writel(0, (void *)(aplic->addr + APLIC_SOURCECFG_BASE +
(i - 1) * sizeof(u32)));
/* Set IRQ target hart index and priority to 1 */
writel(APLIC_DEFAULT_PRIORITY, (void *)(aplic->addr +
APLIC_TARGET_BASE +
(i - 1) * sizeof(u32)));
}
/* Configure IRQ delegation */
first_deleg_irq = -1U;
last_deleg_irq = 0;
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
deleg = &aplic->delegate[i];
if (!deleg->first_irq || !deleg->last_irq)
continue;
if (aplic->num_source < deleg->first_irq ||
aplic->num_source < deleg->last_irq)
continue;
if (APLIC_SOURCECFG_CHILDIDX_MASK < deleg->child_index)
continue;
if (deleg->first_irq > deleg->last_irq) {
tmp = deleg->first_irq;
deleg->first_irq = deleg->last_irq;
deleg->last_irq = tmp;
}
if (deleg->first_irq < first_deleg_irq)
first_deleg_irq = deleg->first_irq;
if (last_deleg_irq < deleg->last_irq)
last_deleg_irq = deleg->last_irq;
for (j = deleg->first_irq; j <= deleg->last_irq; j++)
writel(APLIC_SOURCECFG_D | deleg->child_index,
(void *)(aplic->addr + APLIC_SOURCECFG_BASE +
(j - 1) * sizeof(u32)));
}
/* Default initialization of IDC structures */
for (i = 0; i < aplic->num_idc; i++) {
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
i * APLIC_IDC_SIZE + APLIC_IDC_IDELIVERY));
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
i * APLIC_IDC_SIZE + APLIC_IDC_IFORCE));
writel(APLIC_DISABLE_ITHRESHOLD, (void *)(aplic->addr +
APLIC_IDC_BASE +
(i * APLIC_IDC_SIZE) +
APLIC_IDC_ITHRESHOLD));
}
/* MSI configuration */
if (aplic->targets_mmode && aplic->has_msicfg_mmode) {
aplic_writel_msicfg(&aplic->msicfg_mmode,
(void *)(aplic->addr + APLIC_MMSICFGADDR),
(void *)(aplic->addr + APLIC_MMSICFGADDRH));
}
if (aplic->targets_mmode && aplic->has_msicfg_smode) {
aplic_writel_msicfg(&aplic->msicfg_smode,
(void *)(aplic->addr + APLIC_SMSICFGADDR),
(void *)(aplic->addr + APLIC_SMSICFGADDRH));
}
/* /*
* Add APLIC region to the root domain if: * Add APLIC region to the root domain if:
* 1) It targets M-mode of any HART directly or via MSIs * 1) It targets M-mode of any HART directly or via MSIs
* 2) All interrupts are delegated to some child APLIC * 2) All interrupts are delegated to some child APLIC
*/ */
first_deleg_irq = -1U;
last_deleg_irq = 0;
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
deleg = &aplic->delegate[i];
if (deleg->first_irq < first_deleg_irq)
first_deleg_irq = deleg->first_irq;
if (last_deleg_irq < deleg->last_irq)
last_deleg_irq = deleg->last_irq;
}
if (aplic->targets_mmode || if (aplic->targets_mmode ||
((first_deleg_irq < last_deleg_irq) && ((first_deleg_irq < last_deleg_irq) &&
(last_deleg_irq == aplic->num_source) && (last_deleg_irq == aplic->num_source) &&
@@ -300,8 +278,5 @@ int aplic_cold_irqchip_init(struct aplic_data *aplic)
/* Register irqchip device */ /* Register irqchip device */
sbi_irqchip_add_device(&aplic->irqchip); sbi_irqchip_add_device(&aplic->irqchip);
/* Attach to the aplic list */
sbi_list_add_tail(&aplic->node, &aplic_list);
return 0; return 0;
} }

View File

@@ -199,7 +199,6 @@ static void imsic_ipi_send(u32 hart_index)
static struct sbi_ipi_device imsic_ipi_device = { static struct sbi_ipi_device imsic_ipi_device = {
.name = "aia-imsic", .name = "aia-imsic",
.rating = 300,
.ipi_send = imsic_ipi_send .ipi_send = imsic_ipi_send
}; };
@@ -394,7 +393,7 @@ int imsic_cold_irqchip_init(struct imsic_data *imsic)
sbi_irqchip_add_device(&imsic_device); sbi_irqchip_add_device(&imsic_device);
/* Register IPI device */ /* Register IPI device */
sbi_ipi_add_device(&imsic_ipi_device); sbi_ipi_set_device(&imsic_ipi_device);
return 0; return 0;
} }

View File

@@ -136,7 +136,7 @@ void plic_suspend(void)
return; return;
sbi_for_each_hartindex(h) { sbi_for_each_hartindex(h) {
s16 context_id = plic->context_map[h][PLIC_S_CONTEXT]; u32 context_id = plic->context_map[h][PLIC_S_CONTEXT];
if (context_id < 0) if (context_id < 0)
continue; continue;
@@ -167,7 +167,7 @@ void plic_resume(void)
return; return;
sbi_for_each_hartindex(h) { sbi_for_each_hartindex(h) {
s16 context_id = plic->context_map[h][PLIC_S_CONTEXT]; u32 context_id = plic->context_map[h][PLIC_S_CONTEXT];
if (context_id < 0) if (context_id < 0)
continue; continue;

View File

@@ -22,18 +22,6 @@ config FDT_MPXY_RPMI_SYSMSI
bool "MPXY driver for RPMI system MSI service group" bool "MPXY driver for RPMI system MSI service group"
default n default n
config FDT_MPXY_RPMI_VOLTAGE
bool "MPXY driver for RPMI voltage service group"
default n
config FDT_MPXY_RPMI_DEVICE_POWER
bool "MPXY driver for RPMI device power service group"
default n
config FDT_MPXY_RPMI_PERFORMANCE
bool "MPXY driver for RPMI performance service group"
default n
endif endif
endmenu endmenu

View File

@@ -1,56 +0,0 @@
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
static struct mpxy_rpmi_service_data dpwr_services[] = {
{
.id = RPMI_DPWR_SRV_ENABLE_NOTIFICATION,
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
},
{
.id = RPMI_DPWR_SRV_GET_NUM_DOMAINS,
.min_tx_len = 0,
.max_tx_len = 0,
.min_rx_len = sizeof(struct rpmi_dpwr_get_num_domain_resp),
.max_rx_len = sizeof(struct rpmi_dpwr_get_num_domain_resp),
},
{
.id = RPMI_DPWR_SRV_GET_ATTRIBUTES,
.min_tx_len = sizeof(struct rpmi_dpwr_get_attrs_req),
.max_tx_len = sizeof(struct rpmi_dpwr_get_attrs_req),
.min_rx_len = sizeof(struct rpmi_dpwr_get_attrs_resp),
.max_rx_len = sizeof(struct rpmi_dpwr_get_attrs_resp),
},
{
.id = RPMI_DPWR_SRV_SET_STATE,
.min_tx_len = sizeof(struct rpmi_dpwr_set_state_req),
.max_tx_len = sizeof(struct rpmi_dpwr_set_state_req),
.min_rx_len = sizeof(struct rpmi_dpwr_set_state_resp),
.max_rx_len = sizeof(struct rpmi_dpwr_set_state_resp),
},
{
.id = RPMI_DPWR_SRV_GET_STATE,
.min_tx_len = sizeof(struct rpmi_dpwr_get_state_req),
.max_tx_len = sizeof(struct rpmi_dpwr_get_state_req),
.min_rx_len = sizeof(struct rpmi_dpwr_get_state_resp),
.max_rx_len = sizeof(struct rpmi_dpwr_get_state_resp),
},
};
static const struct mpxy_rpmi_mbox_data dpwr_data = {
.servicegrp_id = RPMI_SRVGRP_DEVICE_POWER,
.num_services = RPMI_DPWR_SRV_MAX_COUNT,
.service_data = dpwr_services,
};
static const struct fdt_match dpwr_match[] = {
{ .compatible = "riscv,rpmi-mpxy-device-power", .data = &dpwr_data },
{ },
};
const struct fdt_driver fdt_mpxy_rpmi_device_power = {
.experimental = true,
.match_table = dpwr_match,
.init = mpxy_rpmi_mbox_init,
};

View File

@@ -1,91 +0,0 @@
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
static struct mpxy_rpmi_service_data performance_services[] = {
{
.id = RPMI_PERF_SRV_ENABLE_NOTIFICATION,
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
},
{
.id = RPMI_PERF_SRV_GET_NUM_DOMAINS,
.min_tx_len = 0,
.max_tx_len = 0,
.min_rx_len = sizeof(struct rpmi_perf_get_num_domain_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_num_domain_resp),
},
{
.id = RPMI_PERF_SRV_GET_ATTRIBUTES,
.min_tx_len = sizeof(struct rpmi_perf_get_attrs_req),
.max_tx_len = sizeof(struct rpmi_perf_get_attrs_req),
.min_rx_len = sizeof(struct rpmi_perf_get_attrs_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_attrs_resp),
},
{
.id = RPMI_PERF_SRV_GET_SUPPORTED_LEVELS,
.min_tx_len = sizeof(struct rpmi_perf_get_supported_level_req),
.max_tx_len = sizeof(struct rpmi_perf_get_supported_level_req),
.min_rx_len = sizeof(struct rpmi_perf_get_supported_level_resp),
.max_rx_len = -1U,
},
{
.id = RPMI_PERF_SRV_GET_LEVEL,
.min_tx_len = sizeof(struct rpmi_perf_get_level_req),
.max_tx_len = sizeof(struct rpmi_perf_get_level_req),
.min_rx_len = sizeof(struct rpmi_perf_get_level_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_level_resp),
},
{
.id = RPMI_PERF_SRV_SET_LEVEL,
.min_tx_len = sizeof(struct rpmi_perf_set_level_req),
.max_tx_len = sizeof(struct rpmi_perf_set_level_req),
.min_rx_len = sizeof(struct rpmi_perf_set_level_resp),
.max_rx_len = sizeof(struct rpmi_perf_set_level_resp),
},
{
.id = RPMI_PERF_SRV_GET_LIMIT,
.min_tx_len = sizeof(struct rpmi_perf_get_limit_req),
.max_tx_len = sizeof(struct rpmi_perf_get_limit_req),
.min_rx_len = sizeof(struct rpmi_perf_get_limit_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_limit_resp),
},
{
.id = RPMI_PERF_SRV_SET_LIMIT,
.min_tx_len = sizeof(struct rpmi_perf_set_limit_req),
.max_tx_len = sizeof(struct rpmi_perf_set_limit_req),
.min_rx_len = sizeof(struct rpmi_perf_set_limit_resp),
.max_rx_len = sizeof(struct rpmi_perf_set_limit_resp),
},
{
.id = RPMI_PERF_SRV_GET_FAST_CHANNEL_REGION,
.min_tx_len = 0,
.max_tx_len = 0,
.min_rx_len = sizeof(struct rpmi_perf_get_fast_chn_region_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_fast_chn_region_resp),
},
{
.id = RPMI_PERF_SRV_GET_FAST_CHANNEL_ATTRIBUTES,
.min_tx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_req),
.max_tx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_req),
.min_rx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_resp),
.max_rx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_resp),
},
};
static const struct mpxy_rpmi_mbox_data performance_data = {
.servicegrp_id = RPMI_SRVGRP_PERFORMANCE ,
.num_services = RPMI_PERF_SRV_MAX_COUNT,
.service_data = performance_services,
};
static const struct fdt_match performance_match[] = {
{ .compatible = "riscv,rpmi-mpxy-performance", .data = &performance_data },
{ },
};
const struct fdt_driver fdt_mpxy_rpmi_performance = {
.experimental = true,
.match_table = performance_match,
.init = mpxy_rpmi_mbox_init,
};

View File

@@ -57,8 +57,7 @@ static int mpxy_rpmi_sysmis_xfer(void *context, struct mbox_chan *chan,
sys_msi_address |= ((u64)le32_to_cpu(((u32 *)xfer->tx)[2])) << 32; sys_msi_address |= ((u64)le32_to_cpu(((u32 *)xfer->tx)[2])) << 32;
if (!sbi_domain_check_addr_range(sbi_domain_thishart_ptr(), if (!sbi_domain_check_addr_range(sbi_domain_thishart_ptr(),
sys_msi_address, 0x4, PRV_S, sys_msi_address, 0x4, PRV_S,
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE | SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) {
SBI_DOMAIN_MMIO)) {
((u32 *)xfer->rx)[0] = cpu_to_le32(RPMI_ERR_INVALID_ADDR); ((u32 *)xfer->rx)[0] = cpu_to_le32(RPMI_ERR_INVALID_ADDR);
args->rx_data_len = sizeof(u32); args->rx_data_len = sizeof(u32);
break; break;

View File

@@ -1,77 +0,0 @@
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
static struct mpxy_rpmi_service_data voltage_services[] = {
{
.id = RPMI_VOLTAGE_SRV_ENABLE_NOTIFICATION,
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
},
{
.id = RPMI_VOLTAGE_SRV_GET_NUM_DOMAINS,
.min_tx_len = 0,
.max_tx_len = 0,
.min_rx_len = sizeof(struct rpmi_voltage_get_num_domains_resp),
.max_rx_len = sizeof(struct rpmi_voltage_get_num_domains_resp),
},
{
.id = RPMI_VOLTAGE_SRV_GET_ATTRIBUTES,
.min_tx_len = sizeof(struct rpmi_voltage_get_attributes_req),
.max_tx_len = sizeof(struct rpmi_voltage_get_attributes_req),
.min_rx_len = sizeof(struct rpmi_voltage_get_attributes_resp),
.max_rx_len = sizeof(struct rpmi_voltage_get_attributes_resp),
},
{
.id = RPMI_VOLTAGE_SRV_GET_SUPPORTED_LEVELS,
.min_tx_len = sizeof(struct rpmi_voltage_get_supported_rate_req),
.max_tx_len = sizeof(struct rpmi_voltage_get_supported_rate_req),
.min_rx_len = sizeof(struct rpmi_voltage_get_supported_rate_resp),
.max_rx_len = -1U,
},
{
.id = RPMI_VOLTAGE_SRV_SET_CONFIG,
.min_tx_len = sizeof(struct rpmi_voltage_set_config_req),
.max_tx_len = sizeof(struct rpmi_voltage_set_config_req),
.min_rx_len = sizeof(struct rpmi_voltage_set_config_resp),
.max_rx_len = sizeof(struct rpmi_voltage_set_config_resp),
},
{
.id = RPMI_VOLTAGE_SRV_GET_CONFIG,
.min_tx_len = sizeof(struct rpmi_voltage_get_config_req),
.max_tx_len = sizeof(struct rpmi_voltage_get_config_req),
.min_rx_len = sizeof(struct rpmi_voltage_get_config_resp),
.max_rx_len = sizeof(struct rpmi_voltage_get_config_resp),
},
{
.id = RPMI_VOLTAGE_SRV_SET_LEVEL,
.min_tx_len = sizeof(struct rpmi_voltage_set_level_req),
.max_tx_len = sizeof(struct rpmi_voltage_set_level_req),
.min_rx_len = sizeof(struct rpmi_voltage_set_level_resp),
.max_rx_len = sizeof(struct rpmi_voltage_set_level_resp),
},
{
.id = RPMI_VOLTAGE_SRV_GET_LEVEL,
.min_tx_len = sizeof(struct rpmi_voltage_get_level_req),
.max_tx_len = sizeof(struct rpmi_voltage_get_level_req),
.min_rx_len = sizeof(struct rpmi_voltage_get_level_resp),
.max_rx_len = sizeof(struct rpmi_voltage_get_level_resp),
},
};
static const struct mpxy_rpmi_mbox_data voltage_data = {
.servicegrp_id = RPMI_SRVGRP_VOLTAGE,
.num_services = RPMI_VOLTAGE_SRV_MAX_COUNT,
.service_data = voltage_services,
};
static const struct fdt_match voltage_match[] = {
{ .compatible = "riscv,rpmi-mpxy-voltage", .data = &voltage_data },
{ },
};
const struct fdt_driver fdt_mpxy_rpmi_voltage = {
.experimental = true,
.match_table = voltage_match,
.init = mpxy_rpmi_mbox_init,
};

View File

@@ -15,14 +15,5 @@ libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_MBOX) += mpxy/fdt_mpxy_rpmi_mbox.o
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += fdt_mpxy_rpmi_clock carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += fdt_mpxy_rpmi_clock
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += mpxy/fdt_mpxy_rpmi_clock.o libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += mpxy/fdt_mpxy_rpmi_clock.o
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_PERFORMANCE) += fdt_mpxy_rpmi_performance
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_PERFORMANCE) += mpxy/fdt_mpxy_rpmi_performance.o
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += fdt_mpxy_rpmi_sysmsi carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += fdt_mpxy_rpmi_sysmsi
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += mpxy/fdt_mpxy_rpmi_sysmsi.o libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += mpxy/fdt_mpxy_rpmi_sysmsi.o
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_VOLTAGE) += fdt_mpxy_rpmi_voltage
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_VOLTAGE) += mpxy/fdt_mpxy_rpmi_voltage.o
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_DEVICE_POWER) += fdt_mpxy_rpmi_device_power
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_DEVICE_POWER) += mpxy/fdt_mpxy_rpmi_device_power.o

View File

@@ -7,7 +7,6 @@
* Rahul Pathak <rpathak@ventanamicro.com> * Rahul Pathak <rpathak@ventanamicro.com>
*/ */
#include <sbi/sbi_hart.h>
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi/sbi_system.h> #include <sbi/sbi_system.h>
#include <sbi/sbi_console.h> #include <sbi/sbi_console.h>
@@ -57,8 +56,6 @@ static void rpmi_do_system_reset(u32 reset_type)
if (ret) if (ret)
sbi_printf("system reset failed [type: %d]: ret: %d\n", sbi_printf("system reset failed [type: %d]: ret: %d\n",
reset_type, ret); reset_type, ret);
sbi_hart_hang();
} }
/** /**

View File

@@ -9,7 +9,6 @@
#include <sbi/riscv_io.h> #include <sbi/riscv_io.h>
#include <sbi/sbi_console.h> #include <sbi/sbi_console.h>
#include <sbi/sbi_domain.h>
#include <sbi_utils/serial/sifive-uart.h> #include <sbi_utils/serial/sifive-uart.h>
/* clang-format off */ /* clang-format off */
@@ -112,7 +111,5 @@ int sifive_uart_init(unsigned long base, u32 in_freq, u32 baudrate)
sbi_console_set_device(&sifive_console); sbi_console_set_device(&sifive_console);
return sbi_domain_root_add_memrange(base, PAGE_SIZE, PAGE_SIZE, return 0;
(SBI_DOMAIN_MEMREGION_MMIO |
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW));
} }

View File

@@ -15,7 +15,7 @@
/* clang-format off */ /* clang-format off */
#define UART_RBR_OFFSET 0 /* In: Receive Buffer Register */ #define UART_RBR_OFFSET 0 /* In: Recieve Buffer Register */
#define UART_THR_OFFSET 0 /* Out: Transmitter Holding Register */ #define UART_THR_OFFSET 0 /* Out: Transmitter Holding Register */
#define UART_DLL_OFFSET 0 /* Out: Divisor Latch Low */ #define UART_DLL_OFFSET 0 /* Out: Divisor Latch Low */
#define UART_IER_OFFSET 1 /* I/O: Interrupt Enable Register */ #define UART_IER_OFFSET 1 /* I/O: Interrupt Enable Register */

View File

@@ -14,10 +14,6 @@ config FDT_SUSPEND_RPMI
depends on FDT_MAILBOX && RPMI_MAILBOX depends on FDT_MAILBOX && RPMI_MAILBOX
default n default n
config FDT_SUSPEND_SIFIVE_SMC0
bool "FDT SIFIVE SMC0 suspend driver"
depends on FDT_HSM_SIFIVE_TMC0 && IRQCHIP_APLIC
default n
endif endif
endmenu endmenu

View File

@@ -1,318 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive
*/
#include <libfdt.h>
#include <sbi/riscv_asm.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_hsm.h>
#include <sbi/sbi_system.h>
#include <sbi/sbi_timer.h>
#include <sbi_utils/cache/fdt_cmo_helper.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/hsm/fdt_hsm_sifive_inst.h>
#include <sbi_utils/hsm/fdt_hsm_sifive_tmc0.h>
#include <sbi_utils/irqchip/aplic.h>
#include <sbi_utils/timer/aclint_mtimer.h>
#define SIFIVE_SMC_PGPREP_OFF 0x0
#define SIFIVE_SMC_PG_OFF 0x4
#define SIFIVE_SMC_CCTIMER_OFF 0xc
#define SIFIVE_SMC_RESUMEPC_LO_OFF 0x10
#define SIFIVE_SMC_RESUMEPC_HI_OFF 0x14
#define SIFIVE_SMC_SYNC_PMC_OFF 0x24
#define SIFIVE_SMC_CYCLECOUNT_LO_OFF 0x28
#define SIFIVE_SMC_CYCLECOUNT_HI_OFF 0x2c
#define SIFIVE_SMC_WFI_UNCORE_CG_OFF 0x50
#define SIFIVE_SMC_PGPREP_ENA_REQ BIT(31)
#define SIFIVE_SMC_PGPREP_ENA_ACK BIT(30)
#define SIFIVE_SMC_PGPREP_DIS_REQ BIT(29)
#define SIFIVE_SMC_PGPREP_DIS_ACK BIT(29)
#define SIFIVE_SMC_PGPREP_FRONTNOTQ BIT(19)
#define SIFIVE_SMC_PGPREP_CLFPNOTQ BIT(18)
#define SIFIVE_SMC_PGPREP_PMCENAERR BIT(17)
#define SIFIVE_SMC_PGPREP_WAKE_DETECT BIT(16)
#define SIFIVE_SMC_PGPREP_BUSERR BIT(15)
#define SIFIVE_SMC_PGPREP_EARLY_ABORT BIT(3)
#define SIFIVE_SMC_PGPREP_INTERNAL_ABORT BIT(2)
#define SIFIVE_SMC_PGPREP_ENARSP (SIFIVE_SMC_PGPREP_FRONTNOTQ | \
SIFIVE_SMC_PGPREP_CLFPNOTQ | \
SIFIVE_SMC_PGPREP_PMCENAERR | \
SIFIVE_SMC_PGPREP_WAKE_DETECT | \
SIFIVE_SMC_PGPREP_BUSERR)
#define SIFIVE_SMC_PGPREP_ABORT (SIFIVE_SMC_PGPREP_EARLY_ABORT | \
SIFIVE_SMC_PGPREP_INTERNAL_ABORT)
#define SIFIVE_SMC_PG_ENA_REQ BIT(31)
#define SIFIVE_SMC_PG_WARM_RESET BIT(1)
#define SIFIVE_SMC_SYNCPMC_SYNC_REQ BIT(31)
#define SIFIVE_SMC_SYNCPMC_SYNC_WREQ BIT(30)
#define SIFIVE_SMC_SYNCPMC_SYNC_ACK BIT(29)
static struct aclint_mtimer_data smc_sync_timer;
static unsigned long smc0_base;
static void sifive_smc0_set_pmcsync(char regid, bool write_mode)
{
unsigned long addr = smc0_base + SIFIVE_SMC_SYNC_PMC_OFF;
u32 v = regid | SIFIVE_SMC_SYNCPMC_SYNC_REQ;
if (write_mode)
v |= SIFIVE_SMC_SYNCPMC_SYNC_WREQ;
writel(v, (void *)addr);
while (!(readl((void *)addr) & SIFIVE_SMC_SYNCPMC_SYNC_ACK));
}
static u64 sifive_smc0_time_read(volatile u64 *addr)
{
u32 lo, hi;
do {
sifive_smc0_set_pmcsync(SIFIVE_SMC_CYCLECOUNT_LO_OFF, false);
sifive_smc0_set_pmcsync(SIFIVE_SMC_CYCLECOUNT_HI_OFF, false);
hi = readl_relaxed((u32 *)addr + 1);
lo = readl_relaxed((u32 *)addr);
} while (hi != readl_relaxed((u32 *)addr + 1));
return ((u64)hi << 32) | (u64)lo;
}
static void sifive_smc0_set_resumepc(physical_addr_t raddr)
{
/* Set resumepc_lo */
writel((u32)raddr, (void *)(smc0_base + SIFIVE_SMC_RESUMEPC_LO_OFF));
/* copy resumepc_lo from SMC to PMC */
sifive_smc0_set_pmcsync(SIFIVE_SMC_RESUMEPC_LO_OFF, true);
#if __riscv_xlen > 32
/* Set resumepc_hi */
writel((u32)(raddr >> 32), (void *)(smc0_base + SIFIVE_SMC_RESUMEPC_HI_OFF));
/* copy resumepc_hi from SMC to PMC */
sifive_smc0_set_pmcsync(SIFIVE_SMC_RESUMEPC_HI_OFF, true);
#endif
}
static u32 sifive_smc0_get_pgprep_enarsp(void)
{
u32 v = readl((void *)(smc0_base + SIFIVE_SMC_PGPREP_OFF));
return v & SIFIVE_SMC_PGPREP_ENARSP;
}
static void sifive_smc0_set_pgprep_disreq(void)
{
unsigned long addr = smc0_base + SIFIVE_SMC_PGPREP_OFF;
u32 v = readl((void *)addr);
writel(v | SIFIVE_SMC_PGPREP_DIS_REQ, (void *)addr);
while (!(readl((void *)addr) & SIFIVE_SMC_PGPREP_DIS_ACK));
}
static u32 sifive_smc0_set_pgprep_enareq(void)
{
unsigned long addr = smc0_base + SIFIVE_SMC_PGPREP_OFF;
u32 v = readl((void *)addr);
writel(v | SIFIVE_SMC_PGPREP_ENA_REQ, (void *)addr);
while (!(readl((void *)addr) & SIFIVE_SMC_PGPREP_ENA_ACK));
v = readl((void *)addr);
return v & SIFIVE_SMC_PGPREP_ABORT;
}
static void sifive_smc0_set_pg_enareq(void)
{
unsigned long addr = smc0_base + SIFIVE_SMC_PG_OFF;
u32 v = readl((void *)addr);
writel(v | SIFIVE_SMC_PG_ENA_REQ, (void *)addr);
}
static inline void sifive_smc0_set_cg(bool enable)
{
unsigned long addr = smc0_base + SIFIVE_SMC_WFI_UNCORE_CG_OFF;
if (enable)
writel(0, (void *)addr);
else
writel(1, (void *)addr);
}
static int sifive_smc0_prep(void)
{
const struct sbi_domain *dom = &root;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
unsigned long i;
int rc;
u32 target;
if (!smc0_base)
return SBI_ENODEV;
/* Prevent all secondary tiles from waking up from PG state */
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
target = sbi_hartindex_to_hartid(i);
if (target != current_hartid()) {
rc = sifive_tmc0_set_wakemask_enareq(target);
if (rc) {
sbi_printf("Fail to enable wakemask for hart %d\n",
target);
goto fail;
}
}
}
/* Check if all secondary tiles enter PG state */
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
target = sbi_hartindex_to_hartid(i);
if (target != current_hartid() &&
!sifive_tmc0_is_pg(target)) {
sbi_printf("Hart %d not in the PG state\n", target);
goto fail;
}
}
rc = sifive_smc0_set_pgprep_enareq();
if (rc) {
sbi_printf("SMC0 error: abort code: 0x%x\n", rc);
goto fail;
}
rc = sifive_smc0_get_pgprep_enarsp();
if (rc) {
sifive_smc0_set_pgprep_disreq();
sbi_printf("SMC0 error: error response code: 0x%x\n", rc);
goto fail;
}
sifive_smc0_set_resumepc(scratch->warmboot_addr);
return SBI_OK;
fail:
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
target = sbi_hartindex_to_hartid(i);
if (target != current_hartid())
sifive_tmc0_set_wakemask_disreq(target);
}
return SBI_EFAIL;
}
static int sifive_smc0_enter(void)
{
const struct sbi_domain *dom = &root;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
unsigned long i;
u32 target, rc;
/* Flush cache and check if there is wake detect or bus error */
if (fdt_cmo_llc_flush_all() &&
sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1))
sifive_cflush();
rc = sifive_smc0_get_pgprep_enarsp();
if (rc) {
sbi_printf("SMC0 error: error response code: 0x%x\n", rc);
rc = SBI_EFAIL;
goto fail;
}
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CEASE)) {
sifive_smc0_set_pg_enareq();
while (1)
sifive_cease();
}
rc = SBI_ENOTSUPP;
fail:
sifive_smc0_set_pgprep_disreq();
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
target = sbi_hartindex_to_hartid(i);
if (target != current_hartid())
sifive_tmc0_set_wakemask_disreq(target);
}
return rc;
}
static int sifive_smc0_pg(void)
{
int rc;
rc = sifive_smc0_prep();
if (rc)
return rc;
return sifive_smc0_enter();
}
static void sifive_smc0_mtime_update(void)
{
struct aclint_mtimer_data *mt = aclint_get_mtimer_data();
aclint_mtimer_update(mt, &smc_sync_timer);
}
static int sifive_smc0_system_suspend_check(u32 sleep_type)
{
return sleep_type == SBI_SUSP_SLEEP_TYPE_SUSPEND ? SBI_OK : SBI_EINVAL;
}
static int sifive_smc0_system_suspend(u32 sleep_type, unsigned long addr)
{
/* Disable the timer interrupt */
sbi_timer_exit(sbi_scratch_thishart_ptr());
return sifive_smc0_pg();
}
static void sifive_smc0_system_resume(void)
{
aplic_reinit_all();
sifive_smc0_mtime_update();
}
static struct sbi_system_suspend_device smc0_sys_susp = {
.name = "Sifive SMC0",
.system_suspend_check = sifive_smc0_system_suspend_check,
.system_suspend = sifive_smc0_system_suspend,
.system_resume = sifive_smc0_system_resume,
};
static int sifive_smc0_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
{
int rc;
u64 addr;
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &addr, NULL);
if (rc)
return rc;
smc0_base = (unsigned long)addr;
smc_sync_timer.time_rd = sifive_smc0_time_read;
smc_sync_timer.mtime_addr = smc0_base + SIFIVE_SMC_CYCLECOUNT_LO_OFF;
sbi_system_suspend_set_device(&smc0_sys_susp);
sifive_smc0_set_cg(true);
return SBI_OK;
}
static const struct fdt_match sifive_smc0_match[] = {
{ .compatible = "sifive,smc0" },
{ },
};
const struct fdt_driver fdt_suspend_sifive_smc0 = {
.match_table = sifive_smc0_match,
.init = sifive_smc0_probe,
};

View File

@@ -9,6 +9,3 @@
carray-fdt_early_drivers-$(CONFIG_FDT_SUSPEND_RPMI) += fdt_suspend_rpmi carray-fdt_early_drivers-$(CONFIG_FDT_SUSPEND_RPMI) += fdt_suspend_rpmi
libsbiutils-objs-$(CONFIG_FDT_SUSPEND_RPMI) += suspend/fdt_suspend_rpmi.o libsbiutils-objs-$(CONFIG_FDT_SUSPEND_RPMI) += suspend/fdt_suspend_rpmi.o
carray-fdt_early_drivers-$(CONFIG_FDT_SUSPEND_SIFIVE_SMC0) += fdt_suspend_sifive_smc0
libsbiutils-objs-$(CONFIG_FDT_SUSPEND_SIFIVE_SMC0) += suspend/fdt_suspend_sifive_smc0.o

View File

@@ -109,34 +109,10 @@ static struct sbi_timer_device mtimer = {
.timer_event_stop = mtimer_event_stop .timer_event_stop = mtimer_event_stop
}; };
struct aclint_mtimer_data *aclint_get_mtimer_data(void) void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
{
return mtimer_get_hart_data_ptr(sbi_scratch_thishart_ptr());
}
void aclint_mtimer_update(struct aclint_mtimer_data *mt,
struct aclint_mtimer_data *ref)
{ {
u64 v1, v2, mv, delta; u64 v1, v2, mv, delta;
u64 *mt_time_val, *ref_time_val; u64 *mt_time_val, *ref_time_val;
if (!mt || !ref || !mt->time_rd || !mt->time_wr || !ref->time_rd)
return;
mt_time_val = (void *)mt->mtime_addr;
ref_time_val = (void *)ref->mtime_addr;
if (!atomic_raw_xchg_ulong(&mt->time_delta_computed, 1)) {
v1 = mt->time_rd(mt_time_val);
mv = ref->time_rd(ref_time_val);
v2 = mt->time_rd(mt_time_val);
delta = mv - ((v1 / 2) + (v2 / 2));
mt->time_wr(false, mt->time_rd(mt_time_val) + delta,
mt_time_val);
}
}
void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
{
struct aclint_mtimer_data *reference; struct aclint_mtimer_data *reference;
/* Sync-up non-shared MTIME if reference is available */ /* Sync-up non-shared MTIME if reference is available */
@@ -144,7 +120,17 @@ void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
return; return;
reference = mt->time_delta_reference; reference = mt->time_delta_reference;
aclint_mtimer_update(mt, reference); mt_time_val = (void *)mt->mtime_addr;
ref_time_val = (void *)reference->mtime_addr;
if (!atomic_raw_xchg_ulong(&mt->time_delta_computed, 1)) {
v1 = mt->time_rd(mt_time_val);
mv = reference->time_rd(ref_time_val);
v2 = mt->time_rd(mt_time_val);
delta = mv - ((v1 / 2) + (v2 / 2));
mt->time_wr(false, mt->time_rd(mt_time_val) + delta,
mt_time_val);
}
} }
void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt, void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt,

View File

@@ -39,7 +39,6 @@ static int timer_mtimer_cold_init(const void *fdt, int nodeoff,
struct aclint_mtimer_data *mt; struct aclint_mtimer_data *mt;
const struct timer_mtimer_quirks *quirks = match->data; const struct timer_mtimer_quirks *quirks = match->data;
bool is_clint = quirks && quirks->is_clint; bool is_clint = quirks && quirks->is_clint;
bool is_ref = false;
mtn = sbi_zalloc(sizeof(*mtn)); mtn = sbi_zalloc(sizeof(*mtn));
if (!mtn) if (!mtn)
@@ -111,16 +110,13 @@ static int timer_mtimer_cold_init(const void *fdt, int nodeoff,
} }
/* /*
* If we have a DT property to indicate which MTIMER is the reference, * Select first MTIMER device with no associated HARTs as our
* select the first MTIMER device that has it. Otherwise, select the * reference MTIMER device. This is only a temporary strategy
* first MTIMER device with no associated HARTs as our reference. * of selecting reference MTIMER device. In future, we might
* define an optional DT property or some other mechanism to
* help us select the reference MTIMER device.
*/ */
if (fdt_getprop(fdt, nodeoff, "riscv,reference-mtimer", NULL)) if (!mt->hart_count && !mt_reference) {
is_ref = true;
else if (!mt->hart_count)
is_ref = true;
if (is_ref && !mt_reference) {
mt_reference = mt; mt_reference = mt;
/* /*
* Set reference for already propbed MTIMER devices * Set reference for already propbed MTIMER devices
@@ -157,10 +153,8 @@ static const struct timer_mtimer_quirks thead_aclint_quirks = {
}; };
static const struct fdt_match timer_mtimer_match[] = { static const struct fdt_match timer_mtimer_match[] = {
{ .compatible = "mips,p8700-aclint-mtimer" },
{ .compatible = "riscv,clint0", .data = &sifive_clint_quirks }, { .compatible = "riscv,clint0", .data = &sifive_clint_quirks },
{ .compatible = "sifive,clint0", .data = &sifive_clint_quirks }, { .compatible = "sifive,clint0", .data = &sifive_clint_quirks },
{ .compatible = "sifive,clint2", .data = &sifive_clint_quirks },
{ .compatible = "thead,c900-clint", .data = &thead_clint_quirks }, { .compatible = "thead,c900-clint", .data = &thead_clint_quirks },
{ .compatible = "thead,c900-aclint-mtimer", { .compatible = "thead,c900-aclint-mtimer",
.data = &thead_aclint_quirks }, .data = &thead_aclint_quirks },

View File

@@ -0,0 +1,10 @@
# SPDX-License-Identifier: BSD-2-Clause
config PLATFORM_ARIANE_FPGA
bool
select FDT
select IPI_MSWI
select IRQCHIP_PLIC
select SERIAL_UART8250
select TIMER_MTIMER
default y

View File

View File

@@ -0,0 +1,42 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019 FORTH-ICS/CARV
# Panagiotis Peristerakis <perister@ics.forth.gr>
#
# Compiler flags
platform-cppflags-y =
platform-cflags-y =
platform-asflags-y =
platform-ldflags-y =
# Object to build
platform-objs-y += platform.o
PLATFORM_RISCV_XLEN = 64
# Blobs to build
FW_JUMP=n
ifeq ($(PLATFORM_RISCV_XLEN), 32)
# This needs to be 4MB aligned for 32-bit support
FW_JUMP_ADDR=0x80400000
else
# This needs to be 2MB aligned for 64-bit support
FW_JUMP_ADDR=0x80200000
endif
FW_JUMP_FDT_ADDR=0x82200000
# Firmware with payload configuration.
FW_PAYLOAD=y
ifeq ($(PLATFORM_RISCV_XLEN), 32)
# This needs to be 4MB aligned for 32-bit support
FW_PAYLOAD_OFFSET=0x400000
else
# This needs to be 2MB aligned for 64-bit support
FW_PAYLOAD_OFFSET=0x200000
endif
FW_PAYLOAD_FDT_ADDR=0x82200000
FW_PAYLOAD_ALIGN=0x1000

View File

@@ -4,13 +4,26 @@
* Panagiotis Peristerakis <perister@ics.forth.gr> * Panagiotis Peristerakis <perister@ics.forth.gr>
*/ */
#include <platform_override.h> #include <sbi/riscv_asm.h>
#include <sbi/riscv_encoding.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_const.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/fdt/fdt_helper.h> #include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/fdt/fdt_fixup.h> #include <sbi_utils/fdt/fdt_fixup.h>
#include <sbi_utils/ipi/aclint_mswi.h> #include <sbi_utils/ipi/aclint_mswi.h>
#include <sbi_utils/irqchip/plic.h> #include <sbi_utils/irqchip/plic.h>
#include <sbi_utils/serial/uart8250.h>
#include <sbi_utils/timer/aclint_mtimer.h> #include <sbi_utils/timer/aclint_mtimer.h>
#define ARIANE_UART_ADDR 0x10000000
#define ARIANE_UART_FREQ 50000000
#define ARIANE_UART_BAUDRATE 115200
#define ARIANE_UART_REG_SHIFT 2
#define ARIANE_UART_REG_WIDTH 4
#define ARIANE_UART_REG_OFFSET 0
#define ARIANE_UART_CAPS 0
#define ARIANE_PLIC_ADDR 0xc000000 #define ARIANE_PLIC_ADDR 0xc000000
#define ARIANE_PLIC_SIZE (0x200000 + \ #define ARIANE_PLIC_SIZE (0x200000 + \
(ARIANE_HART_COUNT * 0x1000)) (ARIANE_HART_COUNT * 0x1000))
@@ -58,39 +71,16 @@ static struct aclint_mtimer_data mtimer = {
*/ */
static int ariane_early_init(bool cold_boot) static int ariane_early_init(bool cold_boot)
{ {
const void *fdt;
struct plic_data plic_data = plic;
unsigned long aclint_freq;
uint64_t clint_addr;
int rc;
if (!cold_boot) if (!cold_boot)
return 0; return 0;
rc = generic_early_init(cold_boot); return uart8250_init(ARIANE_UART_ADDR,
if (rc) ARIANE_UART_FREQ,
return rc; ARIANE_UART_BAUDRATE,
ARIANE_UART_REG_SHIFT,
fdt = fdt_get_address(); ARIANE_UART_REG_WIDTH,
ARIANE_UART_REG_OFFSET,
rc = fdt_parse_timebase_frequency(fdt, &aclint_freq); ARIANE_UART_CAPS);
if (!rc)
mtimer.mtime_freq = aclint_freq;
rc = fdt_parse_compat_addr(fdt, &clint_addr, "riscv,clint0");
if (!rc) {
mswi.addr = clint_addr;
mtimer.mtime_addr = clint_addr + CLINT_MTIMER_OFFSET +
ACLINT_DEFAULT_MTIME_OFFSET;
mtimer.mtimecmp_addr = clint_addr + CLINT_MTIMER_OFFSET +
ACLINT_DEFAULT_MTIMECMP_OFFSET;
}
rc = fdt_parse_plic(fdt, &plic_data, "riscv,plic0");
if (!rc)
plic = plic_data;
return aclint_mswi_cold_init(&mswi);
} }
/* /*
@@ -117,6 +107,14 @@ static int ariane_irqchip_init(void)
return plic_cold_irqchip_init(&plic); return plic_cold_irqchip_init(&plic);
} }
/*
* Initialize IPI during cold boot.
*/
static int ariane_ipi_init(void)
{
return aclint_mswi_cold_init(&mswi);
}
/* /*
* Initialize ariane timer during cold boot. * Initialize ariane timer during cold boot.
*/ */
@@ -125,22 +123,24 @@ static int ariane_timer_init(void)
return aclint_mtimer_cold_init(&mtimer, NULL); return aclint_mtimer_cold_init(&mtimer, NULL);
} }
static int openhwgroup_ariane_platform_init(const void *fdt, int nodeoff, const struct fdt_match *match) /*
{ * Platform descriptor.
generic_platform_ops.early_init = ariane_early_init; */
generic_platform_ops.timer_init = ariane_timer_init; const struct sbi_platform_operations platform_ops = {
generic_platform_ops.irqchip_init = ariane_irqchip_init; .early_init = ariane_early_init,
generic_platform_ops.final_init = ariane_final_init; .final_init = ariane_final_init,
.irqchip_init = ariane_irqchip_init,
return 0; .ipi_init = ariane_ipi_init,
} .timer_init = ariane_timer_init,
static const struct fdt_match openhwgroup_ariane_match[] = {
{ .compatible = "eth,ariane-bare-dev" },
{ },
}; };
const struct fdt_driver openhwgroup_ariane = { const struct sbi_platform platform = {
.match_table = openhwgroup_ariane_match, .opensbi_version = OPENSBI_VERSION,
.init = openhwgroup_ariane_platform_init, .platform_version = SBI_PLATFORM_VERSION(0x0, 0x01),
.name = "ARIANE RISC-V",
.features = SBI_PLATFORM_DEFAULT_FEATURES,
.hart_count = ARIANE_HART_COUNT,
.hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE,
.heap_size = SBI_PLATFORM_DEFAULT_HEAP_SIZE(ARIANE_HART_COUNT),
.platform_ops_addr = (unsigned long)&platform_ops
}; };

View File

@@ -0,0 +1,10 @@
# SPDX-License-Identifier: BSD-2-Clause
config PLATFORM_OPENPITON_FPGA
bool
select FDT
select IPI_MSWI
select IRQCHIP_PLIC
select SERIAL_UART8250
select TIMER_MTIMER
default y

View File

@@ -0,0 +1,41 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Western Digital Corporation or its affiliates.
#
# Compiler flags
platform-cppflags-y =
platform-cflags-y =
platform-asflags-y =
platform-ldflags-y =
# Objects to build
platform-objs-y += platform.o
PLATFORM_RISCV_XLEN = 64
# Blobs to build
FW_JUMP=n
ifeq ($(PLATFORM_RISCV_XLEN), 32)
# This needs to be 4MB aligned for 32-bit support
FW_JUMP_ADDR=0x80400000
else
# This needs to be 2MB aligned for 64-bit support
FW_JUMP_ADDR=0x80200000
endif
FW_JUMP_FDT_ADDR=0x82200000
# Firmware with payload configuration.
FW_PAYLOAD=y
ifeq ($(PLATFORM_RISCV_XLEN), 32)
# This needs to be 4MB aligned for 32-bit support
FW_PAYLOAD_OFFSET=0x400000
else
# This needs to be 2MB aligned for 64-bit support
FW_PAYLOAD_OFFSET=0x200000
endif
FW_PAYLOAD_FDT_ADDR=0x82200000
FW_PAYLOAD_ALIGN=0x1000

View File

@@ -3,27 +3,45 @@
* Copyright (c) 2020 Western Digital Corporation or its affiliates. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/ */
#include <platform_override.h> #include <sbi/riscv_asm.h>
#include <sbi/riscv_encoding.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_const.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/fdt/fdt_helper.h> #include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/fdt/fdt_fixup.h> #include <sbi_utils/fdt/fdt_fixup.h>
#include <sbi_utils/ipi/aclint_mswi.h> #include <sbi_utils/ipi/aclint_mswi.h>
#include <sbi_utils/irqchip/plic.h> #include <sbi_utils/irqchip/plic.h>
#include <sbi_utils/serial/uart8250.h>
#include <sbi_utils/timer/aclint_mtimer.h> #include <sbi_utils/timer/aclint_mtimer.h>
#define OPENPITON_DEFAULT_PLIC_ADDR 0xfff1100000ULL #define OPENPITON_DEFAULT_UART_ADDR 0xfff0c2c000
#define OPENPITON_DEFAULT_UART_FREQ 60000000
#define OPENPITON_DEFAULT_UART_BAUDRATE 115200
#define OPENPITON_DEFAULT_UART_REG_SHIFT 0
#define OPENPITON_DEFAULT_UART_REG_WIDTH 1
#define OPENPITON_DEFAULT_UART_REG_OFFSET 0
#define OPENPITON_DEFAULT_UART_CAPS 0
#define OPENPITON_DEFAULT_PLIC_ADDR 0xfff1100000
#define OPENPITON_DEFAULT_PLIC_SIZE (0x200000 + \ #define OPENPITON_DEFAULT_PLIC_SIZE (0x200000 + \
(OPENPITON_DEFAULT_HART_COUNT * 0x1000)) (OPENPITON_DEFAULT_HART_COUNT * 0x1000))
#define OPENPITON_DEFAULT_PLIC_NUM_SOURCES 2 #define OPENPITON_DEFAULT_PLIC_NUM_SOURCES 2
#define OPENPITON_DEFAULT_HART_COUNT 3 #define OPENPITON_DEFAULT_HART_COUNT 3
#define OPENPITON_DEFAULT_CLINT_ADDR 0xfff1020000ULL #define OPENPITON_DEFAULT_CLINT_ADDR 0xfff1020000
#define OPENPITON_DEFAULT_ACLINT_MTIMER_FREQ 1000000 #define OPENPITON_DEFAULT_ACLINT_MTIMER_FREQ 1000000
#define OPENPITON_DEFAULT_ACLINT_MSWI_ADDR \ #define OPENPITON_DEFAULT_ACLINT_MSWI_ADDR \
(OPENPITON_DEFAULT_CLINT_ADDR + CLINT_MSWI_OFFSET) (OPENPITON_DEFAULT_CLINT_ADDR + CLINT_MSWI_OFFSET)
#define OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR \ #define OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR \
(OPENPITON_DEFAULT_CLINT_ADDR + CLINT_MTIMER_OFFSET) (OPENPITON_DEFAULT_CLINT_ADDR + CLINT_MTIMER_OFFSET)
static struct platform_uart_data uart = {
OPENPITON_DEFAULT_UART_ADDR,
OPENPITON_DEFAULT_UART_FREQ,
OPENPITON_DEFAULT_UART_BAUDRATE,
};
static struct plic_data plic = { static struct plic_data plic = {
.addr = (unsigned long)OPENPITON_DEFAULT_PLIC_ADDR, .addr = OPENPITON_DEFAULT_PLIC_ADDR,
.size = OPENPITON_DEFAULT_PLIC_SIZE, .size = OPENPITON_DEFAULT_PLIC_SIZE,
.num_src = OPENPITON_DEFAULT_PLIC_NUM_SOURCES, .num_src = OPENPITON_DEFAULT_PLIC_NUM_SOURCES,
.flags = PLIC_FLAG_ARIANE_BUG, .flags = PLIC_FLAG_ARIANE_BUG,
@@ -35,7 +53,7 @@ static struct plic_data plic = {
}; };
static struct aclint_mswi_data mswi = { static struct aclint_mswi_data mswi = {
.addr = (unsigned long)OPENPITON_DEFAULT_ACLINT_MSWI_ADDR, .addr = OPENPITON_DEFAULT_ACLINT_MSWI_ADDR,
.size = ACLINT_MSWI_SIZE, .size = ACLINT_MSWI_SIZE,
.first_hartid = 0, .first_hartid = 0,
.hart_count = OPENPITON_DEFAULT_HART_COUNT, .hart_count = OPENPITON_DEFAULT_HART_COUNT,
@@ -43,10 +61,10 @@ static struct aclint_mswi_data mswi = {
static struct aclint_mtimer_data mtimer = { static struct aclint_mtimer_data mtimer = {
.mtime_freq = OPENPITON_DEFAULT_ACLINT_MTIMER_FREQ, .mtime_freq = OPENPITON_DEFAULT_ACLINT_MTIMER_FREQ,
.mtime_addr = (unsigned long)OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR + .mtime_addr = OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR +
ACLINT_DEFAULT_MTIME_OFFSET, ACLINT_DEFAULT_MTIME_OFFSET,
.mtime_size = ACLINT_DEFAULT_MTIME_SIZE, .mtime_size = ACLINT_DEFAULT_MTIME_SIZE,
.mtimecmp_addr = (unsigned long)OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR + .mtimecmp_addr = OPENPITON_DEFAULT_ACLINT_MTIMER_ADDR +
ACLINT_DEFAULT_MTIMECMP_OFFSET, ACLINT_DEFAULT_MTIMECMP_OFFSET,
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE, .mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0, .first_hartid = 0,
@@ -60,6 +78,7 @@ static struct aclint_mtimer_data mtimer = {
static int openpiton_early_init(bool cold_boot) static int openpiton_early_init(bool cold_boot)
{ {
const void *fdt; const void *fdt;
struct platform_uart_data uart_data = { 0 };
struct plic_data plic_data = plic; struct plic_data plic_data = plic;
unsigned long aclint_freq; unsigned long aclint_freq;
uint64_t clint_addr; uint64_t clint_addr;
@@ -69,9 +88,9 @@ static int openpiton_early_init(bool cold_boot)
return 0; return 0;
fdt = fdt_get_address(); fdt = fdt_get_address();
rc = generic_early_init(cold_boot); rc = fdt_parse_uart8250(fdt, &uart_data, "ns16550");
if (rc) if (!rc)
return rc; uart = uart_data;
rc = fdt_parse_plic(fdt, &plic_data, "riscv,plic0"); rc = fdt_parse_plic(fdt, &plic_data, "riscv,plic0");
if (!rc) if (!rc)
@@ -90,10 +109,11 @@ static int openpiton_early_init(bool cold_boot)
ACLINT_DEFAULT_MTIMECMP_OFFSET; ACLINT_DEFAULT_MTIMECMP_OFFSET;
} }
if (rc) return uart8250_init(uart.addr, uart.freq, uart.baud,
return rc; OPENPITON_DEFAULT_UART_REG_SHIFT,
OPENPITON_DEFAULT_UART_REG_WIDTH,
return aclint_mswi_cold_init(&mswi); OPENPITON_DEFAULT_UART_REG_OFFSET,
OPENPITON_DEFAULT_UART_CAPS);
} }
/* /*
@@ -120,6 +140,14 @@ static int openpiton_irqchip_init(void)
return plic_cold_irqchip_init(&plic); return plic_cold_irqchip_init(&plic);
} }
/*
* Initialize IPI during cold boot.
*/
static int openpiton_ipi_init(void)
{
return aclint_mswi_cold_init(&mswi);
}
/* /*
* Initialize openpiton timer during cold boot. * Initialize openpiton timer during cold boot.
*/ */
@@ -128,22 +156,25 @@ static int openpiton_timer_init(void)
return aclint_mtimer_cold_init(&mtimer, NULL); return aclint_mtimer_cold_init(&mtimer, NULL);
} }
static int openhwgroup_openpiton_platform_init(const void *fdt, int nodeoff, const struct fdt_match *match) /*
{ * Platform descriptor.
generic_platform_ops.early_init = openpiton_early_init; */
generic_platform_ops.timer_init = openpiton_timer_init; const struct sbi_platform_operations platform_ops = {
generic_platform_ops.irqchip_init = openpiton_irqchip_init; .early_init = openpiton_early_init,
generic_platform_ops.final_init = openpiton_final_init; .final_init = openpiton_final_init,
.irqchip_init = openpiton_irqchip_init,
return 0; .ipi_init = openpiton_ipi_init,
} .timer_init = openpiton_timer_init,
static const struct fdt_match openhwgroup_openpiton_match[] = {
{ .compatible = "openpiton,cva6platform" },
{ },
}; };
const struct fdt_driver openhwgroup_openpiton = { const struct sbi_platform platform = {
.match_table = openhwgroup_openpiton_match, .opensbi_version = OPENSBI_VERSION,
.init = openhwgroup_openpiton_platform_init, .platform_version = SBI_PLATFORM_VERSION(0x0, 0x01),
.name = "OPENPITON RISC-V",
.features = SBI_PLATFORM_DEFAULT_FEATURES,
.hart_count = OPENPITON_DEFAULT_HART_COUNT,
.hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE,
.heap_size =
SBI_PLATFORM_DEFAULT_HEAP_SIZE(OPENPITON_DEFAULT_HART_COUNT),
.platform_ops_addr = (unsigned long)&platform_ops
}; };

View File

@@ -23,11 +23,6 @@ config PLATFORM_GENERIC_MINOR_VER
range 0 65535 range 0 65535
default 1 default 1
config PLATFORM_GENERIC_FDT_EMPTY_SPACE
int "Platform FDT empty space (KB)"
range 0 1024
default 4
config PLATFORM_ALLWINNER_D1 config PLATFORM_ALLWINNER_D1
bool "Allwinner D1 support" bool "Allwinner D1 support"
depends on FDT_IRQCHIP_PLIC depends on FDT_IRQCHIP_PLIC
@@ -41,21 +36,6 @@ config PLATFORM_ANDES_AE350
select ANDES_PMA select ANDES_PMA
default n default n
config PLATFORM_ANDES_QILAI
bool "Andes QiLai support"
select ANDES_PMU
select ANDES_PMA
select ANDES_SBI
default n
config PLATFORM_OPENHWGROUP_OPENPITON
bool "OpenHWGroup Openpiton support"
default n
config PLATFORM_OPENHWGROUP_ARIANE
bool "OpenHWGroup Ariane support"
default n
config PLATFORM_RENESAS_RZFIVE config PLATFORM_RENESAS_RZFIVE
bool "Renesas RZ/Five support" bool "Renesas RZ/Five support"
select ANDES_PMA select ANDES_PMA
@@ -92,11 +72,6 @@ config PLATFORM_MIPS_P8700
bool "MIPS P8700 support" bool "MIPS P8700 support"
default n default n
config PLATFORM_SPACEMIT_K1
bool "Spacemit K1 support"
select FDT_HSM_SPACEMIT
default n
source "$(OPENSBI_SRC_DIR)/platform/generic/andes/Kconfig" source "$(OPENSBI_SRC_DIR)/platform/generic/andes/Kconfig"
source "$(OPENSBI_SRC_DIR)/platform/generic/thead/Kconfig" source "$(OPENSBI_SRC_DIR)/platform/generic/thead/Kconfig"

View File

@@ -35,7 +35,7 @@ static int ae350_hart_start(u32 hartid, ulong saddr)
* 2) the target hart is non-sleepable 25-series hart0 * 2) the target hart is non-sleepable 25-series hart0
*/ */
if (!sbi_init_count(hartindex) || (is_andes(25) && hartid == 0)) if (!sbi_init_count(hartindex) || (is_andes(25) && hartid == 0))
return sbi_ipi_raw_send(hartindex, false); return sbi_ipi_raw_send(hartindex);
/* Write wakeup command to the sleep hart */ /* Write wakeup command to the sleep hart */
smu_set_command(&smu, WAKEUP_CMD, hartid); smu_set_command(&smu, WAKEUP_CMD, hartid);

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: BSD-2-Clause // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2023 Renesas Electronics Corp. * Copyright (C) 2023 Renesas Electronics Corp.
* Copyright (c) 2024 Andes Technology Corporation * Copyright (c) 2024 Andes Technology Corporation
@@ -17,6 +17,78 @@
#include <sbi/sbi_error.h> #include <sbi/sbi_error.h>
#include <sbi_utils/fdt/fdt_helper.h> #include <sbi_utils/fdt/fdt_helper.h>
static unsigned long andes_pma_read_num(unsigned int csr_num)
{
#define switchcase_csr_read(__csr_num, __val) \
case __csr_num: \
__val = csr_read(__csr_num); \
break;
#define switchcase_csr_read_2(__csr_num, __val) \
switchcase_csr_read(__csr_num + 0, __val) \
switchcase_csr_read(__csr_num + 1, __val)
#define switchcase_csr_read_4(__csr_num, __val) \
switchcase_csr_read_2(__csr_num + 0, __val) \
switchcase_csr_read_2(__csr_num + 2, __val)
#define switchcase_csr_read_8(__csr_num, __val) \
switchcase_csr_read_4(__csr_num + 0, __val) \
switchcase_csr_read_4(__csr_num + 4, __val)
#define switchcase_csr_read_16(__csr_num, __val) \
switchcase_csr_read_8(__csr_num + 0, __val) \
switchcase_csr_read_8(__csr_num + 8, __val)
unsigned long ret = 0;
switch (csr_num) {
switchcase_csr_read_4(CSR_PMACFG0, ret)
switchcase_csr_read_16(CSR_PMAADDR0, ret)
default:
sbi_panic("%s: Unknown Andes PMA CSR %#x", __func__, csr_num);
break;
}
return ret;
#undef switchcase_csr_read_16
#undef switchcase_csr_read_8
#undef switchcase_csr_read_4
#undef switchcase_csr_read_2
#undef switchcase_csr_read
}
static void andes_pma_write_num(unsigned int csr_num, unsigned long val)
{
#define switchcase_csr_write(__csr_num, __val) \
case __csr_num: \
csr_write(__csr_num, __val); \
break;
#define switchcase_csr_write_2(__csr_num, __val) \
switchcase_csr_write(__csr_num + 0, __val) \
switchcase_csr_write(__csr_num + 1, __val)
#define switchcase_csr_write_4(__csr_num, __val) \
switchcase_csr_write_2(__csr_num + 0, __val) \
switchcase_csr_write_2(__csr_num + 2, __val)
#define switchcase_csr_write_8(__csr_num, __val) \
switchcase_csr_write_4(__csr_num + 0, __val) \
switchcase_csr_write_4(__csr_num + 4, __val)
#define switchcase_csr_write_16(__csr_num, __val) \
switchcase_csr_write_8(__csr_num + 0, __val) \
switchcase_csr_write_8(__csr_num + 8, __val)
switch (csr_num) {
switchcase_csr_write_4(CSR_PMACFG0, val)
switchcase_csr_write_16(CSR_PMAADDR0, val)
default:
sbi_panic("%s: Unknown Andes PMA CSR %#x", __func__, csr_num);
break;
}
#undef switchcase_csr_write_16
#undef switchcase_csr_write_8
#undef switchcase_csr_write_4
#undef switchcase_csr_write_2
#undef switchcase_csr_write
}
static inline bool not_napot(unsigned long addr, unsigned long size) static inline bool not_napot(unsigned long addr, unsigned long size)
{ {
return ((size & (size - 1)) || (addr & (size - 1))); return ((size & (size - 1)) || (addr & (size - 1)));
@@ -36,11 +108,11 @@ static char get_pmaxcfg(int entry_id)
#if __riscv_xlen == 64 #if __riscv_xlen == 64
pmacfg_addr = CSR_PMACFG0 + ((entry_id / 8) ? 2 : 0); pmacfg_addr = CSR_PMACFG0 + ((entry_id / 8) ? 2 : 0);
pmacfg_val = csr_read_num(pmacfg_addr); pmacfg_val = andes_pma_read_num(pmacfg_addr);
pmaxcfg = (char *)&pmacfg_val + (entry_id % 8); pmaxcfg = (char *)&pmacfg_val + (entry_id % 8);
#elif __riscv_xlen == 32 #elif __riscv_xlen == 32
pmacfg_addr = CSR_PMACFG0 + (entry_id / 4); pmacfg_addr = CSR_PMACFG0 + (entry_id / 4);
pmacfg_val = csr_read_num(pmacfg_addr); pmacfg_val = andes_pma_read_num(pmacfg_addr);
pmaxcfg = (char *)&pmacfg_val + (entry_id % 4); pmaxcfg = (char *)&pmacfg_val + (entry_id % 4);
#else #else
#error "Unexpected __riscv_xlen" #error "Unexpected __riscv_xlen"
@@ -56,17 +128,17 @@ static void set_pmaxcfg(int entry_id, char flags)
#if __riscv_xlen == 64 #if __riscv_xlen == 64
pmacfg_addr = CSR_PMACFG0 + ((entry_id / 8) ? 2 : 0); pmacfg_addr = CSR_PMACFG0 + ((entry_id / 8) ? 2 : 0);
pmacfg_val = csr_read_num(pmacfg_addr); pmacfg_val = andes_pma_read_num(pmacfg_addr);
pmaxcfg = (char *)&pmacfg_val + (entry_id % 8); pmaxcfg = (char *)&pmacfg_val + (entry_id % 8);
#elif __riscv_xlen == 32 #elif __riscv_xlen == 32
pmacfg_addr = CSR_PMACFG0 + (entry_id / 4); pmacfg_addr = CSR_PMACFG0 + (entry_id / 4);
pmacfg_val = csr_read_num(pmacfg_addr); pmacfg_val = andes_pma_read_num(pmacfg_addr);
pmaxcfg = (char *)&pmacfg_val + (entry_id % 4); pmaxcfg = (char *)&pmacfg_val + (entry_id % 4);
#else #else
#error "Unexpected __riscv_xlen" #error "Unexpected __riscv_xlen"
#endif #endif
*pmaxcfg = flags; *pmaxcfg = flags;
csr_write_num(pmacfg_addr, pmacfg_val); andes_pma_write_num(pmacfg_addr, pmacfg_val);
} }
static void decode_pmaaddrx(int entry_id, unsigned long *start, static void decode_pmaaddrx(int entry_id, unsigned long *start,
@@ -80,7 +152,7 @@ static void decode_pmaaddrx(int entry_id, unsigned long *start,
* size = 2 ^ (k + 3) * size = 2 ^ (k + 3)
* start = 4 * ($pmaaddr - (size / 8) + 1) * start = 4 * ($pmaaddr - (size / 8) + 1)
*/ */
pmaaddr = csr_read_num(CSR_PMAADDR0 + entry_id); pmaaddr = andes_pma_read_num(CSR_PMAADDR0 + entry_id);
k = sbi_ffz(pmaaddr); k = sbi_ffz(pmaaddr);
*size = 1 << (k + 3); *size = 1 << (k + 3);
*start = (pmaaddr - (1 << k) + 1) << 2; *start = (pmaaddr - (1 << k) + 1) << 2;
@@ -127,9 +199,9 @@ static unsigned long andes_pma_setup(const struct andes_pma_region *pma_region,
pmaaddr = (addr >> 2) + (size >> 3) - 1; pmaaddr = (addr >> 2) + (size >> 3) - 1;
csr_write_num(CSR_PMAADDR0 + entry_id, pmaaddr); andes_pma_write_num(CSR_PMAADDR0 + entry_id, pmaaddr);
return csr_read_num(CSR_PMAADDR0 + entry_id) == pmaaddr ? return andes_pma_read_num(CSR_PMAADDR0 + entry_id) == pmaaddr ?
pmaaddr : SBI_EINVAL; pmaaddr : SBI_EINVAL;
} }
@@ -357,7 +429,7 @@ int andes_sbi_free_pma(unsigned long pa)
continue; continue;
set_pmaxcfg(i, ANDES_PMACFG_ETYP_OFF); set_pmaxcfg(i, ANDES_PMACFG_ETYP_OFF);
csr_write_num(CSR_PMAADDR0 + i, 0); andes_pma_write_num(CSR_PMAADDR0 + i, 0);
return SBI_SUCCESS; return SBI_SUCCESS;
} }

View File

@@ -5,9 +5,6 @@
carray-platform_override_modules-$(CONFIG_PLATFORM_ANDES_AE350) += andes_ae350 carray-platform_override_modules-$(CONFIG_PLATFORM_ANDES_AE350) += andes_ae350
platform-objs-$(CONFIG_PLATFORM_ANDES_AE350) += andes/ae350.o andes/sleep.o platform-objs-$(CONFIG_PLATFORM_ANDES_AE350) += andes/ae350.o andes/sleep.o
carray-platform_override_modules-$(CONFIG_PLATFORM_ANDES_QILAI) += andes_qilai
platform-objs-$(CONFIG_PLATFORM_ANDES_QILAI) += andes/qilai.o
platform-objs-$(CONFIG_ANDES_PMA) += andes/andes_pma.o platform-objs-$(CONFIG_ANDES_PMA) += andes/andes_pma.o
platform-objs-$(CONFIG_ANDES_SBI) += andes/andes_sbi.o platform-objs-$(CONFIG_ANDES_SBI) += andes/andes_sbi.o
platform-objs-$(CONFIG_ANDES_PMU) += andes/andes_pmu.o platform-objs-$(CONFIG_ANDES_PMU) += andes/andes_pmu.o

View File

@@ -1,66 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Andes Technology Corporation
*
*/
#include <andes/andes_pma.h>
#include <andes/andes_pmu.h>
#include <andes/andes_sbi.h>
#include <andes/qilai.h>
#include <platform_override.h>
#include <sbi/sbi_domain.h>
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/fdt/fdt_helper.h>
static int andes_qilai_final_init(bool cold_boot)
{
int rc;
/*
* Set the memory attribute for 3 PCIE endpoint regions,
* and they are all non-idempotent and non-bufferable.
*/
rc = andes_sbi_set_pma((unsigned long)PCIE0_BASE, (unsigned long)PCIE0_SIZE,
ANDES_PMACFG_ETYP_NAPOT |
ANDES_PMACFG_MTYP_DEV_NOBUF);
if (rc)
return rc;
rc = andes_sbi_set_pma((unsigned long)PCIE1_BASE, (unsigned long)PCIE1_SIZE,
ANDES_PMACFG_ETYP_NAPOT |
ANDES_PMACFG_MTYP_DEV_NOBUF);
if (rc)
return rc;
rc = andes_sbi_set_pma((unsigned long)PCIE2_BASE, (unsigned long)PCIE2_SIZE,
ANDES_PMACFG_ETYP_NAPOT |
ANDES_PMACFG_MTYP_DEV_NOBUF);
if (rc)
return rc;
return generic_final_init(cold_boot);
}
static int andes_qilai_platform_init(const void *fdt, int nodeoff,
const struct fdt_match *match)
{
generic_platform_ops.final_init = andes_qilai_final_init;
generic_platform_ops.extensions_init = andes_pmu_extensions_init;
generic_platform_ops.pmu_init = andes_pmu_init;
generic_platform_ops.vendor_ext_provider =
andes_sbi_vendor_ext_provider;
return 0;
}
static const struct fdt_match andes_qilai_match[] = {
{ .compatible = "andestech,qilai" },
{},
};
const struct fdt_driver andes_qilai = {
.match_table = andes_qilai_match,
.init = andes_qilai_platform_init,
};

View File

@@ -1,8 +1,5 @@
CONFIG_PLATFORM_ALLWINNER_D1=y CONFIG_PLATFORM_ALLWINNER_D1=y
CONFIG_PLATFORM_ANDES_AE350=y CONFIG_PLATFORM_ANDES_AE350=y
CONFIG_PLATFORM_ANDES_QILAI=y
CONFIG_PLATFORM_OPENHWGROUP_ARIANE=y
CONFIG_PLATFORM_OPENHWGROUP_OPENPITON=y
CONFIG_PLATFORM_RENESAS_RZFIVE=y CONFIG_PLATFORM_RENESAS_RZFIVE=y
CONFIG_PLATFORM_SIFIVE_FU540=y CONFIG_PLATFORM_SIFIVE_FU540=y
CONFIG_PLATFORM_SIFIVE_FU740=y CONFIG_PLATFORM_SIFIVE_FU740=y
@@ -10,11 +7,6 @@ CONFIG_PLATFORM_SOPHGO_SG2042=y
CONFIG_PLATFORM_STARFIVE_JH7110=y CONFIG_PLATFORM_STARFIVE_JH7110=y
CONFIG_PLATFORM_THEAD=y CONFIG_PLATFORM_THEAD=y
CONFIG_PLATFORM_MIPS_P8700=y CONFIG_PLATFORM_MIPS_P8700=y
CONFIG_PLATFORM_SPACEMIT_K1=y
CONFIG_FDT_CACHE=y
CONFIG_FDT_CACHE_SIFIVE_CCACHE=y
CONFIG_FDT_CACHE_SIFIVE_EC=y
CONFIG_FDT_CACHE_SIFIVE_PL2=y
CONFIG_FDT_CPPC=y CONFIG_FDT_CPPC=y
CONFIG_FDT_CPPC_RPMI=y CONFIG_FDT_CPPC_RPMI=y
CONFIG_FDT_GPIO=y CONFIG_FDT_GPIO=y
@@ -23,7 +15,6 @@ CONFIG_FDT_GPIO_SIFIVE=y
CONFIG_FDT_GPIO_STARFIVE=y CONFIG_FDT_GPIO_STARFIVE=y
CONFIG_FDT_HSM=y CONFIG_FDT_HSM=y
CONFIG_FDT_HSM_RPMI=y CONFIG_FDT_HSM_RPMI=y
CONFIG_FDT_HSM_SIFIVE_TMC0=y
CONFIG_FDT_I2C=y CONFIG_FDT_I2C=y
CONFIG_FDT_I2C_SIFIVE=y CONFIG_FDT_I2C_SIFIVE=y
CONFIG_FDT_I2C_DW=y CONFIG_FDT_I2C_DW=y
@@ -44,8 +35,8 @@ CONFIG_FDT_RESET_ATCWDT200=y
CONFIG_FDT_RESET_GPIO=y CONFIG_FDT_RESET_GPIO=y
CONFIG_FDT_RESET_HTIF=y CONFIG_FDT_RESET_HTIF=y
CONFIG_FDT_RESET_RPMI=y CONFIG_FDT_RESET_RPMI=y
CONFIG_FDT_RESET_SG2042_HWMON_MCU=y
CONFIG_FDT_RESET_SUNXI_WDT=y CONFIG_FDT_RESET_SUNXI_WDT=y
CONFIG_FDT_RESET_SG2042_HWMON_MCU=y
CONFIG_FDT_RESET_SYSCON=y CONFIG_FDT_RESET_SYSCON=y
CONFIG_FDT_SERIAL=y CONFIG_FDT_SERIAL=y
CONFIG_FDT_SERIAL_CADENCE=y CONFIG_FDT_SERIAL_CADENCE=y
@@ -60,14 +51,10 @@ CONFIG_FDT_SERIAL_XILINX_UARTLITE=y
CONFIG_SERIAL_SEMIHOSTING=y CONFIG_SERIAL_SEMIHOSTING=y
CONFIG_FDT_SUSPEND=y CONFIG_FDT_SUSPEND=y
CONFIG_FDT_SUSPEND_RPMI=y CONFIG_FDT_SUSPEND_RPMI=y
CONFIG_FDT_SUSPEND_SIFIVE_SMC0=y
CONFIG_FDT_TIMER=y CONFIG_FDT_TIMER=y
CONFIG_FDT_TIMER_MTIMER=y CONFIG_FDT_TIMER_MTIMER=y
CONFIG_FDT_TIMER_PLMT=y CONFIG_FDT_TIMER_PLMT=y
CONFIG_FDT_MPXY=y CONFIG_FDT_MPXY=y
CONFIG_FDT_MPXY_RPMI_MBOX=y CONFIG_FDT_MPXY_RPMI_MBOX=y
CONFIG_FDT_MPXY_RPMI_CLOCK=y CONFIG_FDT_MPXY_RPMI_CLOCK=y
CONFIG_FDT_MPXY_RPMI_VOLTAGE=y
CONFIG_FDT_MPXY_RPMI_DEVICE_POWER=y
CONFIG_FDT_MPXY_RPMI_PERFORMANCE=y
CONFIG_FDT_MPXY_RPMI_SYSMSI=y CONFIG_FDT_MPXY_RPMI_SYSMSI=y

Some files were not shown because too many files have changed in this diff Show More