forked from Mirrors/opensbi
Compare commits
101 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
111738090c | ||
|
|
843e916dca | ||
|
|
5eec86eec8 | ||
|
|
42139bb9b7 | ||
|
|
b6da690ffb | ||
|
|
809df05c35 | ||
|
|
644a344226 | ||
|
|
4339e85794 | ||
|
|
afc24152bb | ||
|
|
dffa24b7f5 | ||
|
|
6a20872c91 | ||
|
|
d65c1e95a7 | ||
|
|
51fe6a8bc9 | ||
|
|
1f9677582a | ||
|
|
126c9d34d2 | ||
|
|
b8b26fe121 | ||
|
|
f71bb323f4 | ||
|
|
ec51e91eaa | ||
|
|
35aece218a | ||
|
|
de376252f4 | ||
|
|
4997eb28da | ||
|
|
825d0e918a | ||
|
|
d28e2fa9cc | ||
|
|
c9f856e23f | ||
|
|
da05980de6 | ||
|
|
c75f468ad5 | ||
|
|
fade4399d2 | ||
|
|
976a6a8612 | ||
|
|
2e9dc3b430 | ||
|
|
5de1d3240f | ||
|
|
38a6106b10 | ||
|
|
e8dfa55f3d | ||
|
|
834d0d9f26 | ||
|
|
a28e51016e | ||
|
|
fa911ebe72 | ||
|
|
0250db4dad | ||
|
|
b210376fe2 | ||
|
|
631efeeb49 | ||
|
|
b34caeef81 | ||
|
|
34657b377f | ||
|
|
90c3b94094 | ||
|
|
667eed2266 | ||
|
|
32c1d38dcf | ||
|
|
37b72cb575 | ||
|
|
ab23d8a392 | ||
|
|
8f8c393155 | ||
|
|
1514a32730 | ||
|
|
94f0f84656 | ||
|
|
c2d2b9140a | ||
|
|
64904e5d5c | ||
|
|
8752c809b3 | ||
|
|
ce4dc7649e | ||
|
|
8ea972838c | ||
|
|
d6b684ec86 | ||
|
|
1207c7568f | ||
|
|
ac16c6b604 | ||
|
|
63aacbd782 | ||
|
|
1db95da299 | ||
|
|
55296fd27c | ||
|
|
3990c8ee07 | ||
|
|
ca380bcb10 | ||
|
|
fb70fe8b98 | ||
|
|
1f84ec2ac2 | ||
|
|
e3eb59a396 | ||
|
|
38c31ffb8f | ||
|
|
f7d060c26a | ||
|
|
5de8c1d499 | ||
|
|
040f3100a9 | ||
|
|
8408845cc9 | ||
|
|
944db4eced | ||
|
|
d9afef57b7 | ||
|
|
f04ae48263 | ||
|
|
55135abcd5 | ||
|
|
cb70dffa0a | ||
|
|
85f22b38c8 | ||
|
|
ee92afa638 | ||
|
|
17b8d1900d | ||
|
|
153cdeea53 | ||
|
|
8dcd1448e7 | ||
|
|
64a38525e6 | ||
|
|
1ffbd063c4 | ||
|
|
6a1f53bc2d | ||
|
|
4b687e3669 | ||
|
|
6068efc7f5 | ||
|
|
bbe9a23060 | ||
|
|
525ac970b3 | ||
|
|
3204d74486 | ||
|
|
84044ee83c | ||
|
|
cc546e1a06 | ||
|
|
079bf6f0f9 | ||
|
|
ffd3ed976d | ||
|
|
0b7c2e0d60 | ||
|
|
e10a45752f | ||
|
|
4825a3f87f | ||
|
|
3876f8cd1e | ||
|
|
5b305e30a5 | ||
|
|
663b05a5f7 | ||
|
|
edfbc1285d | ||
|
|
ea5abd1f5e | ||
|
|
61083eb504 | ||
|
|
b8f370aa37 |
@@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
# See here for more information about the format and editor support:
|
||||
# https://editorconfig.org/
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -151,6 +151,12 @@ endif
|
||||
|
||||
# Guess the compiler's XLEN
|
||||
OPENSBI_CC_XLEN := $(shell TMP=`$(CC) $(CLANG_TARGET) -dumpmachine | sed 's/riscv\([0-9][0-9]\).*/\1/'`; echo $${TMP})
|
||||
# If guessing XLEN fails, default to 64
|
||||
ifneq ($(OPENSBI_CC_XLEN),32)
|
||||
ifneq ($(OPENSBI_CC_XLEN),64)
|
||||
OPENSBI_CC_XLEN = 64
|
||||
endif
|
||||
endif
|
||||
|
||||
# Guess the compiler's ABI and ISA
|
||||
ifneq ($(CC_IS_CLANG),y)
|
||||
@@ -374,6 +380,7 @@ GENFLAGS += $(firmware-genflags-y)
|
||||
CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing -ffunction-sections -fdata-sections
|
||||
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
||||
CFLAGS += -std=gnu11
|
||||
CFLAGS += $(REPRODUCIBLE_FLAGS)
|
||||
# Optionally supported flags
|
||||
ifeq ($(CC_SUPPORT_VECTOR),y)
|
||||
@@ -444,11 +451,14 @@ DTSCPPFLAGS = $(CPPFLAGS) -nostdinc -nostdlib -fno-builtin -D__DTS__ -x assemble
|
||||
|
||||
ifneq ($(DEBUG),)
|
||||
CFLAGS += -O0
|
||||
ELFFLAGS += -Wl,--print-gc-sections
|
||||
else
|
||||
CFLAGS += -O2
|
||||
endif
|
||||
|
||||
ifeq ($(V), 1)
|
||||
ELFFLAGS += -Wl,--print-gc-sections
|
||||
endif
|
||||
|
||||
# Setup functions for compilation
|
||||
define dynamic_flags
|
||||
-I$(shell dirname $(2)) -D__OBJNAME__=$(subst -,_,$(shell basename $(1) .o))
|
||||
|
||||
@@ -13,7 +13,7 @@ The FPGA SoC currently contains the following peripherals:
|
||||
- Bootrom containing zero stage bootloader and device tree.
|
||||
|
||||
To build platform specific library and firmwares, provide the
|
||||
*PLATFORM=fpga/ariane* parameter to the top level `make` command.
|
||||
*PLATFORM=generic* parameter to the top level `make` command.
|
||||
|
||||
Platform Options
|
||||
----------------
|
||||
@@ -26,7 +26,7 @@ Building Ariane FPGA Platform
|
||||
**Linux Kernel Payload**
|
||||
|
||||
```
|
||||
make PLATFORM=fpga/ariane FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
|
||||
make PLATFORM=generic FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
|
||||
```
|
||||
|
||||
Booting Ariane FPGA Platform
|
||||
|
||||
@@ -7,8 +7,8 @@ processor from ETH Zurich. To this end, Ariane has been equipped with a
|
||||
different L1 cache subsystem that follows a write-through protocol and that has
|
||||
support for cache invalidations and atomics.
|
||||
|
||||
To build platform specific library and firmwares, provide the
|
||||
*PLATFORM=fpga/openpiton* parameter to the top level `make` command.
|
||||
To build platform specific library and firmwares, provide the *PLATFORM=generic*
|
||||
parameter to the top level `make` command.
|
||||
|
||||
Platform Options
|
||||
----------------
|
||||
@@ -21,7 +21,7 @@ Building Ariane FPGA Platform
|
||||
**Linux Kernel Payload**
|
||||
|
||||
```
|
||||
make PLATFORM=fpga/openpiton FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
|
||||
make PLATFORM=generic FW_PAYLOAD_PATH=<linux_build_directory>/arch/riscv/boot/Image
|
||||
```
|
||||
|
||||
Booting Ariane FPGA Platform
|
||||
|
||||
@@ -47,6 +47,8 @@ RISC-V Platforms Using Generic Platform
|
||||
* **SiFive HiFive Unleashed** (*[sifive_fu540.md]*)
|
||||
* **Spike** (*[spike.md]*)
|
||||
* **T-HEAD C9xx series Processors** (*[thead-c9xx.md]*)
|
||||
* **OpenPiton FPGA SoC** (*[fpga-openpiton.md]*)
|
||||
* **Ariane FPGA SoC** (*[fpga-ariane.md]*)
|
||||
|
||||
[andes-ae350.md]: andes-ae350.md
|
||||
[qemu_virt.md]: qemu_virt.md
|
||||
@@ -55,3 +57,5 @@ RISC-V Platforms Using Generic Platform
|
||||
[sifive_fu540.md]: sifive_fu540.md
|
||||
[spike.md]: spike.md
|
||||
[thead-c9xx.md]: thead-c9xx.md
|
||||
[fpga-openpiton.md]: fpga-openpiton.md
|
||||
[fpga-ariane.md]: fpga-ariane.md
|
||||
|
||||
@@ -21,20 +21,12 @@ OpenSBI currently supports the following virtual and hardware platforms:
|
||||
* **Kendryte K210 SoC**: Platform support for the Kendryte K210 SoC used on
|
||||
boards such as the Kendryte KD233 or the Sipeed MAIX Dock.
|
||||
|
||||
* **Ariane FPGA SoC**: Platform support for the Ariane FPGA SoC used on
|
||||
Genesys 2 board. More details on this platform can be found in the file
|
||||
*[fpga-ariane.md]*.
|
||||
|
||||
* **Andes AE350 SoC**: Platform support for the Andes's SoC (AE350). More
|
||||
details on this platform can be found in the file *[andes-ae350.md]*.
|
||||
|
||||
* **Spike**: Platform support for the Spike emulator. More
|
||||
details on this platform can be found in the file *[spike.md]*.
|
||||
|
||||
* **OpenPiton FPGA SoC**: Platform support OpenPiton research platform based
|
||||
on ariane core. More details on this platform can be found in the file
|
||||
*[fpga-openpiton.md]*.
|
||||
|
||||
* **Shakti C-class SoC Platform**: Platform support for Shakti C-class
|
||||
processor based SOCs. More details on this platform can be found in the
|
||||
file *[shakti_cclass.md]*.
|
||||
@@ -52,10 +44,8 @@ comments to facilitate the implementation.
|
||||
[generic.md]: generic.md
|
||||
[qemu_virt.md]: qemu_virt.md
|
||||
[sifive_fu540.md]: sifive_fu540.md
|
||||
[fpga-ariane.md]: fpga-ariane.md
|
||||
[andes-ae350.md]: andes-ae350.md
|
||||
[thead-c910.md]: thead-c910.md
|
||||
[spike.md]: spike.md
|
||||
[fpga-openpiton.md]: fpga-openpiton.md
|
||||
[shakti_cclass.md]: shakti_cclass.md
|
||||
[renesas-rzfive.md]: renesas-rzfive.md
|
||||
|
||||
@@ -1 +1,28 @@
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
menu "Stack Protector Support"
|
||||
|
||||
config STACK_PROTECTOR
|
||||
bool "Stack Protector buffer overflow detection"
|
||||
default n
|
||||
help
|
||||
This option turns on the "stack-protector" compiler feature.
|
||||
|
||||
config STACK_PROTECTOR_STRONG
|
||||
bool "Strong Stack Protector"
|
||||
depends on STACK_PROTECTOR
|
||||
default n
|
||||
help
|
||||
Turn on the "stack-protector" with "-fstack-protector-strong" option.
|
||||
Like -fstack-protector but includes additional functions to be
|
||||
protected.
|
||||
|
||||
config STACK_PROTECTOR_ALL
|
||||
bool "Almighty Stack Protector"
|
||||
depends on STACK_PROTECTOR
|
||||
default n
|
||||
help
|
||||
Turn on the "stack-protector" with "-fstack-protector-all" option.
|
||||
Like -fstack-protector except that all functions are protected.
|
||||
|
||||
endmenu
|
||||
|
||||
@@ -76,21 +76,21 @@ _sc_fail:
|
||||
li t0, FW_TEXT_START /* link start */
|
||||
lla t1, _fw_start /* load start */
|
||||
sub t2, t1, t0 /* load offset */
|
||||
lla t0, __rel_dyn_start
|
||||
lla t1, __rel_dyn_end
|
||||
lla t0, __rela_dyn_start
|
||||
lla t1, __rela_dyn_end
|
||||
beq t0, t1, _relocate_done
|
||||
2:
|
||||
REG_L t5, REGBYTES(t0) /* t5 <-- relocation info:type */
|
||||
REG_L t5, __SIZEOF_LONG__(t0) /* t5 <-- relocation info:type */
|
||||
li t3, R_RISCV_RELATIVE /* reloc type R_RISCV_RELATIVE */
|
||||
bne t5, t3, 3f
|
||||
REG_L t3, 0(t0)
|
||||
REG_L t5, (REGBYTES * 2)(t0) /* t5 <-- addend */
|
||||
REG_L t5, (__SIZEOF_LONG__ * 2)(t0) /* t5 <-- addend */
|
||||
add t5, t5, t2
|
||||
add t3, t3, t2
|
||||
REG_S t5, 0(t3) /* store runtime address to the GOT entry */
|
||||
|
||||
3:
|
||||
addi t0, t0, (REGBYTES * 3)
|
||||
addi t0, t0, (__SIZEOF_LONG__ * 3)
|
||||
blt t0, t1, 2b
|
||||
_relocate_done:
|
||||
/* At this point we are running from link address */
|
||||
@@ -736,6 +736,27 @@ _reset_regs:
|
||||
|
||||
ret
|
||||
|
||||
.section .rodata
|
||||
.Lstack_corrupt_msg:
|
||||
.string "stack smashing detected\n"
|
||||
|
||||
/* This will be called when the stack corruption is detected */
|
||||
.section .text
|
||||
.align 3
|
||||
.globl __stack_chk_fail
|
||||
.type __stack_chk_fail, %function
|
||||
__stack_chk_fail:
|
||||
la a0, .Lstack_corrupt_msg
|
||||
call sbi_panic
|
||||
|
||||
/* Initial value of the stack guard variable */
|
||||
.section .data
|
||||
.align 3
|
||||
.globl __stack_chk_guard
|
||||
.type __stack_chk_guard, %object
|
||||
__stack_chk_guard:
|
||||
RISCV_PTR 0x95B5FF5A
|
||||
|
||||
#ifdef FW_FDT_PATH
|
||||
.section .rodata
|
||||
.align 4
|
||||
|
||||
@@ -47,9 +47,9 @@
|
||||
. = ALIGN(0x1000); /* Ensure next section is page aligned */
|
||||
|
||||
.rela.dyn : {
|
||||
PROVIDE(__rel_dyn_start = .);
|
||||
PROVIDE(__rela_dyn_start = .);
|
||||
*(.rela*)
|
||||
PROVIDE(__rel_dyn_end = .);
|
||||
PROVIDE(__rela_dyn_end = .);
|
||||
}
|
||||
|
||||
PROVIDE(_rodata_end = .);
|
||||
|
||||
@@ -66,3 +66,12 @@ endif
|
||||
ifdef FW_OPTIONS
|
||||
firmware-genflags-y += -DFW_OPTIONS=$(FW_OPTIONS)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_STACK_PROTECTOR),y)
|
||||
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR) := -fstack-protector
|
||||
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_STRONG) := -fstack-protector-strong
|
||||
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_ALL) := -fstack-protector-all
|
||||
else
|
||||
stack-protector-cflags-y := -fno-stack-protector
|
||||
endif
|
||||
firmware-cflags-y += $(stack-protector-cflags-y)
|
||||
|
||||
@@ -97,3 +97,18 @@ _boot_a0:
|
||||
RISCV_PTR 0
|
||||
_boot_a1:
|
||||
RISCV_PTR 0
|
||||
|
||||
/* This will be called when the stack corruption is detected */
|
||||
.section .text
|
||||
.align 3
|
||||
.globl __stack_chk_fail
|
||||
.type __stack_chk_fail, %function
|
||||
.equ __stack_chk_fail, _start_hang
|
||||
|
||||
/* Initial value of the stack guard variable */
|
||||
.section .data
|
||||
.align 3
|
||||
.globl __stack_chk_guard
|
||||
.type __stack_chk_guard, %object
|
||||
__stack_chk_guard:
|
||||
RISCV_PTR 0x95B5FF5A
|
||||
|
||||
@@ -46,6 +46,13 @@ static inline void sbi_ecall_console_puts(const char *str)
|
||||
sbi_strlen(str), (unsigned long)str, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline void sbi_ecall_shutdown(void)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET,
|
||||
SBI_SRST_RESET_TYPE_SHUTDOWN, SBI_SRST_RESET_REASON_NONE,
|
||||
0, 0, 0, 0);
|
||||
}
|
||||
|
||||
#define wfi() \
|
||||
do { \
|
||||
__asm__ __volatile__("wfi" ::: "memory"); \
|
||||
@@ -54,7 +61,6 @@ static inline void sbi_ecall_console_puts(const char *str)
|
||||
void test_main(unsigned long a0, unsigned long a1)
|
||||
{
|
||||
sbi_ecall_console_puts("\nTest payload running\n");
|
||||
|
||||
while (1)
|
||||
wfi();
|
||||
sbi_ecall_shutdown();
|
||||
sbi_ecall_console_puts("sbi_ecall_shutdown failed to execute.\n");
|
||||
}
|
||||
|
||||
@@ -122,6 +122,50 @@ enum {
|
||||
RV_DBTR_DECLARE_BIT_MASK(MC, TYPE, 4),
|
||||
};
|
||||
|
||||
|
||||
/* ICOUNT - Match Control Type Register */
|
||||
enum {
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, ACTION, 0),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, U, 6),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, S, 7),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, PENDING, 8),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, M, 9),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, COUNT, 10),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, HIT, 24),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, VU, 25),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, VS, 26),
|
||||
#if __riscv_xlen == 64
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 59),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 60),
|
||||
#elif __riscv_xlen == 32
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, DMODE, 27),
|
||||
RV_DBTR_DECLARE_BIT(ICOUNT, TYPE, 28),
|
||||
#else
|
||||
#error "Unknown __riscv_xlen"
|
||||
#endif
|
||||
};
|
||||
|
||||
enum {
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, ACTION, 6),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, U, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, S, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, PENDING, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, M, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, COUNT, 14),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, HIT, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VU, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, VS, 1),
|
||||
#if __riscv_xlen == 64
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
|
||||
#elif __riscv_xlen == 32
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, DMODE, 1),
|
||||
RV_DBTR_DECLARE_BIT_MASK(ICOUNT, TYPE, 4),
|
||||
#else
|
||||
#error "Unknown __riscv_xlen"
|
||||
#endif
|
||||
};
|
||||
|
||||
/* MC6 - Match Control 6 Type Register */
|
||||
enum {
|
||||
RV_DBTR_DECLARE_BIT(MC6, LOAD, 0),
|
||||
|
||||
@@ -378,6 +378,9 @@
|
||||
#define CSR_SSTATEEN2 0x10E
|
||||
#define CSR_SSTATEEN3 0x10F
|
||||
|
||||
/* Supervisor Resource Management Configuration CSRs */
|
||||
#define CSR_SRMCFG 0x181
|
||||
|
||||
/* Machine-Level Control transfer records CSRs */
|
||||
#define CSR_MCTRCTL 0x34e
|
||||
|
||||
@@ -783,6 +786,40 @@
|
||||
#define CSR_VTYPE 0xc21
|
||||
#define CSR_VLENB 0xc22
|
||||
|
||||
/* Custom CSR ranges */
|
||||
#define CSR_CUSTOM0_U_RW_BASE 0x800
|
||||
#define CSR_CUSTOM0_U_RW_COUNT 0x100
|
||||
|
||||
#define CSR_CUSTOM1_U_RO_BASE 0xCC0
|
||||
#define CSR_CUSTOM1_U_RO_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM2_S_RW_BASE 0x5C0
|
||||
#define CSR_CUSTOM2_S_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM3_S_RW_BASE 0x9C0
|
||||
#define CSR_CUSTOM3_S_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM4_S_RO_BASE 0xDC0
|
||||
#define CSR_CUSTOM4_S_RO_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM5_HS_RW_BASE 0x6C0
|
||||
#define CSR_CUSTOM5_HS_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM6_HS_RW_BASE 0xAC0
|
||||
#define CSR_CUSTOM6_HS_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM7_HS_RO_BASE 0xEC0
|
||||
#define CSR_CUSTOM7_HS_RO_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM8_M_RW_BASE 0x7C0
|
||||
#define CSR_CUSTOM8_M_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM9_M_RW_BASE 0xBC0
|
||||
#define CSR_CUSTOM9_M_RW_COUNT 0x040
|
||||
|
||||
#define CSR_CUSTOM10_M_RO_BASE 0xFC0
|
||||
#define CSR_CUSTOM10_M_RO_COUNT 0x040
|
||||
|
||||
/* ===== Trap/Exception Causes ===== */
|
||||
|
||||
#define CAUSE_MISALIGNED_FETCH 0x0
|
||||
@@ -815,6 +852,8 @@
|
||||
#define SMSTATEEN0_FCSR (_ULL(1) << SMSTATEEN0_FCSR_SHIFT)
|
||||
#define SMSTATEEN0_CTR_SHIFT 54
|
||||
#define SMSTATEEN0_CTR (_ULL(1) << SMSTATEEN0_CTR_SHIFT)
|
||||
#define SMSTATEEN0_SRMCFG_SHIFT 55
|
||||
#define SMSTATEEN0_SRMCFG (_ULL(1) << SMSTATEEN0_SRMCFG_SHIFT)
|
||||
#define SMSTATEEN0_CONTEXT_SHIFT 57
|
||||
#define SMSTATEEN0_CONTEXT (_ULL(1) << SMSTATEEN0_CONTEXT_SHIFT)
|
||||
#define SMSTATEEN0_IMSIC_SHIFT 58
|
||||
@@ -910,7 +949,7 @@
|
||||
#define INSN_MASK_WFI 0xffffff00
|
||||
#define INSN_MATCH_WFI 0x10500000
|
||||
|
||||
#define INSN_MASK_FENCE_TSO 0xffffffff
|
||||
#define INSN_MASK_FENCE_TSO 0xfff0707f
|
||||
#define INSN_MATCH_FENCE_TSO 0x8330000f
|
||||
|
||||
#define INSN_MASK_VECTOR_UNIT_STRIDE 0xfdf0707f
|
||||
@@ -1291,6 +1330,8 @@
|
||||
#define SHIFT_FUNCT3 12
|
||||
|
||||
#define MASK_RS1 0xf8000
|
||||
#define MASK_RS2 0x1f00000
|
||||
#define MASK_RD 0xf80
|
||||
|
||||
#define MASK_CSR 0xfff00000
|
||||
#define SHIFT_CSR 20
|
||||
@@ -1315,13 +1356,6 @@
|
||||
|
||||
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
|
||||
|
||||
#if __riscv_xlen == 64
|
||||
#define LOG_REGBYTES 3
|
||||
#else
|
||||
#define LOG_REGBYTES 2
|
||||
#endif
|
||||
#define REGBYTES (1 << LOG_REGBYTES)
|
||||
|
||||
#define SH_VSEW 3
|
||||
#define SH_VIEW 12
|
||||
#define SH_VD 7
|
||||
@@ -1356,28 +1390,17 @@
|
||||
#define SHIFT_RIGHT(x, y) \
|
||||
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
|
||||
|
||||
#define REG_MASK \
|
||||
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
|
||||
|
||||
#define REG_OFFSET(insn, pos) \
|
||||
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
|
||||
|
||||
#define REG_PTR(insn, pos, regs) \
|
||||
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
|
||||
|
||||
#define GET_FUNC3(insn) ((insn & MASK_FUNCT3) >> SHIFT_FUNCT3)
|
||||
#define GET_RM(insn) GET_FUNC3(insn)
|
||||
#define GET_RS1_NUM(insn) ((insn & MASK_RS1) >> 15)
|
||||
#define GET_RS1_NUM(insn) ((insn & MASK_RS1) >> SH_RS1)
|
||||
#define GET_RS2_NUM(insn) ((insn & MASK_RS2) >> SH_RS2)
|
||||
#define GET_RS1S_NUM(insn) RVC_RS1S(insn)
|
||||
#define GET_RS2S_NUM(insn) RVC_RS2S(insn)
|
||||
#define GET_RS2C_NUM(insn) RVC_RS2(insn)
|
||||
#define GET_RD_NUM(insn) ((insn & MASK_RD) >> SH_RD)
|
||||
#define GET_CSR_NUM(insn) ((insn & MASK_CSR) >> SHIFT_CSR)
|
||||
#define GET_AQRL(insn) ((insn & MASK_AQRL) >> SHIFT_AQRL)
|
||||
|
||||
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
|
||||
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
|
||||
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
|
||||
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
|
||||
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
|
||||
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
|
||||
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
|
||||
#define IMM_I(insn) ((s32)(insn) >> 20)
|
||||
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
|
||||
(s32)(((insn) >> 7) & 0x1f))
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
# define _conv_cast(type, val) ((type)(val))
|
||||
#endif
|
||||
|
||||
#define BSWAP16(x) ((((x) & 0x00ff) << 8) | \
|
||||
#define __BSWAP16(x) ((((x) & 0x00ff) << 8) | \
|
||||
(((x) & 0xff00) >> 8))
|
||||
#define BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
|
||||
#define __BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
|
||||
(((x) & 0x0000ff00) << 8) | \
|
||||
(((x) & 0x00ff0000) >> 8) | \
|
||||
(((x) & 0xff000000) >> 24))
|
||||
#define BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
|
||||
#define __BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
|
||||
(((x) & 0x000000000000ff00ULL) << 40) | \
|
||||
(((x) & 0x0000000000ff0000ULL) << 24) | \
|
||||
(((x) & 0x00000000ff000000ULL) << 8) | \
|
||||
@@ -29,6 +29,10 @@
|
||||
(((x) & 0x00ff000000000000ULL) >> 40) | \
|
||||
(((x) & 0xff00000000000000ULL) >> 56))
|
||||
|
||||
#define BSWAP64(x) ({ uint64_t _sv = (x); __BSWAP64(_sv); })
|
||||
#define BSWAP32(x) ({ uint32_t _sv = (x); __BSWAP32(_sv); })
|
||||
#define BSWAP16(x) ({ uint16_t _sv = (x); __BSWAP16(_sv); })
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* CPU(little-endian) */
|
||||
#define cpu_to_be16(x) _conv_cast(uint16_t, BSWAP16(x))
|
||||
#define cpu_to_be32(x) _conv_cast(uint32_t, BSWAP32(x))
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
({ \
|
||||
register ulong tinfo asm("a3") = (ulong)trap; \
|
||||
register ulong ttmp asm("a4"); \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
register ulong ret = 0; \
|
||||
((struct sbi_trap_info *)(trap))->cause = 0; \
|
||||
asm volatile( \
|
||||
@@ -37,7 +37,7 @@
|
||||
({ \
|
||||
register ulong tinfo asm("a3") = (ulong)trap; \
|
||||
register ulong ttmp asm("a4"); \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
((struct sbi_trap_info *)(trap))->cause = 0; \
|
||||
asm volatile( \
|
||||
"add %[ttmp], %[tinfo], zero\n" \
|
||||
|
||||
@@ -121,6 +121,9 @@ struct sbi_domain_memregion {
|
||||
((__flags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK) && \
|
||||
!(__flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK))
|
||||
|
||||
#define SBI_DOMAIN_MEMREGION_IS_FIRMWARE(__flags) \
|
||||
((__flags & SBI_DOMAIN_MEMREGION_FW) ? true : false) \
|
||||
|
||||
/** Bit to control if permissions are enforced on all modes */
|
||||
#define SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS (1UL << 6)
|
||||
|
||||
@@ -157,6 +160,7 @@ struct sbi_domain_memregion {
|
||||
SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
|
||||
|
||||
#define SBI_DOMAIN_MEMREGION_MMIO (1UL << 31)
|
||||
#define SBI_DOMAIN_MEMREGION_FW (1UL << 30)
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
@@ -249,6 +253,13 @@ void sbi_domain_memregion_init(unsigned long addr,
|
||||
unsigned long flags,
|
||||
struct sbi_domain_memregion *reg);
|
||||
|
||||
/**
|
||||
* Return the Smepmp pmpcfg LRWX encoding for the flags in @reg.
|
||||
*
|
||||
* @param reg pointer to memory region; its flags field encodes permissions.
|
||||
*/
|
||||
unsigned int sbi_domain_get_smepmp_flags(struct sbi_domain_memregion *reg);
|
||||
|
||||
/**
|
||||
* Check whether we can access specified address for given mode and
|
||||
* memory region flags under a domain
|
||||
|
||||
@@ -291,6 +291,15 @@ struct sbi_pmu_event_info {
|
||||
#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
|
||||
#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
|
||||
#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
|
||||
/* Event configuration mask */
|
||||
#define SBI_PMU_CFG_EVENT_MASK \
|
||||
( \
|
||||
SBI_PMU_CFG_FLAG_SET_VUINH | \
|
||||
SBI_PMU_CFG_FLAG_SET_VSINH | \
|
||||
SBI_PMU_CFG_FLAG_SET_UINH | \
|
||||
SBI_PMU_CFG_FLAG_SET_SINH | \
|
||||
SBI_PMU_CFG_FLAG_SET_MINH \
|
||||
)
|
||||
|
||||
/* Flags defined for counter start function */
|
||||
#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
|
||||
|
||||
@@ -79,8 +79,14 @@ enum sbi_hart_extensions {
|
||||
SBI_HART_EXT_SMCTR,
|
||||
/** HART has CTR S-mode CSRs */
|
||||
SBI_HART_EXT_SSCTR,
|
||||
/** Hart has Ssqosid extension */
|
||||
SBI_HART_EXT_SSQOSID,
|
||||
/** HART has Ssstateen extension **/
|
||||
SBI_HART_EXT_SSSTATEEN,
|
||||
/** Hart has Xsfcflushdlone extension */
|
||||
SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1,
|
||||
/** Hart has Xsfcease extension */
|
||||
SBI_HART_EXT_XSIFIVE_CEASE,
|
||||
|
||||
/** Maximum index of Hart extension */
|
||||
SBI_HART_EXT_MAX,
|
||||
@@ -101,21 +107,6 @@ enum sbi_hart_csrs {
|
||||
SBI_HART_CSR_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* Smepmp enforces access boundaries between M-mode and
|
||||
* S/U-mode. When it is enabled, the PMPs are programmed
|
||||
* such that M-mode doesn't have access to S/U-mode memory.
|
||||
*
|
||||
* To give M-mode R/W access to the shared memory between M and
|
||||
* S/U-mode, first entry is reserved. It is disabled at boot.
|
||||
* When shared memory access is required, the physical address
|
||||
* should be programmed into the first PMP entry with R/W
|
||||
* permissions to the M-mode. Once the work is done, it should be
|
||||
* unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
|
||||
* pair should be used to map/unmap the shared memory.
|
||||
*/
|
||||
#define SBI_SMEPMP_RESV_ENTRY 0
|
||||
|
||||
struct sbi_hart_features {
|
||||
bool detected;
|
||||
int priv_version;
|
||||
@@ -128,27 +119,20 @@ struct sbi_hart_features {
|
||||
unsigned int mhpm_bits;
|
||||
};
|
||||
|
||||
extern unsigned long hart_features_offset;
|
||||
#define sbi_hart_features_ptr(__s) sbi_scratch_offset_ptr(__s, hart_features_offset)
|
||||
|
||||
struct sbi_scratch;
|
||||
|
||||
int sbi_hart_reinit(struct sbi_scratch *scratch);
|
||||
int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot);
|
||||
|
||||
extern void (*sbi_hart_expected_trap)(void);
|
||||
static inline ulong sbi_hart_expected_trap_addr(void)
|
||||
{
|
||||
return (ulong)sbi_hart_expected_trap;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch);
|
||||
void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
|
||||
const char *prefix, const char *suffix);
|
||||
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
|
||||
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch);
|
||||
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch);
|
||||
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch);
|
||||
int sbi_hart_pmp_configure(struct sbi_scratch *scratch);
|
||||
int sbi_hart_map_saddr(unsigned long base, unsigned long size);
|
||||
int sbi_hart_unmap_saddr(void);
|
||||
int sbi_hart_priv_version(struct sbi_scratch *scratch);
|
||||
void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch,
|
||||
char *version_str, int nvstr);
|
||||
|
||||
20
include/sbi/sbi_hart_pmp.h
Normal file
20
include/sbi/sbi_hart_pmp.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
||||
*/
|
||||
|
||||
#ifndef __SBI_HART_PMP_H__
|
||||
#define __SBI_HART_PMP_H__
|
||||
|
||||
#include <sbi/sbi_types.h>
|
||||
|
||||
struct sbi_scratch;
|
||||
|
||||
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
|
||||
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch);
|
||||
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch);
|
||||
bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx);
|
||||
int sbi_hart_pmp_init(struct sbi_scratch *scratch);
|
||||
|
||||
#endif
|
||||
100
include/sbi/sbi_hart_protection.h
Normal file
100
include/sbi/sbi_hart_protection.h
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
||||
*/
|
||||
|
||||
#ifndef __SBI_HART_PROTECTION_H__
|
||||
#define __SBI_HART_PROTECTION_H__
|
||||
|
||||
#include <sbi/sbi_types.h>
|
||||
#include <sbi/sbi_list.h>
|
||||
|
||||
struct sbi_scratch;
|
||||
|
||||
/** Representation of hart protection mechanism */
|
||||
struct sbi_hart_protection {
|
||||
/** List head */
|
||||
struct sbi_dlist head;
|
||||
|
||||
/** Name of the hart protection mechanism */
|
||||
char name[32];
|
||||
|
||||
/** Ratings of the hart protection mechanism (higher is better) */
|
||||
unsigned long rating;
|
||||
|
||||
/** Configure protection for current HART (Mandatory) */
|
||||
int (*configure)(struct sbi_scratch *scratch);
|
||||
|
||||
/** Unconfigure protection for current HART (Mandatory) */
|
||||
void (*unconfigure)(struct sbi_scratch *scratch);
|
||||
|
||||
/** Create temporary mapping to access address range on current HART (Optional) */
|
||||
int (*map_range)(struct sbi_scratch *scratch,
|
||||
unsigned long base, unsigned long size);
|
||||
|
||||
/** Destroy temporary mapping on current HART (Optional) */
|
||||
int (*unmap_range)(struct sbi_scratch *scratch,
|
||||
unsigned long base, unsigned long size);
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the best hart protection mechanism
|
||||
*
|
||||
* @return pointer to best hart protection mechanism
|
||||
*/
|
||||
struct sbi_hart_protection *sbi_hart_protection_best(void);
|
||||
|
||||
/**
|
||||
* Register a hart protection mechanism
|
||||
*
|
||||
* @param hprot pointer to hart protection mechanism
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
int sbi_hart_protection_register(struct sbi_hart_protection *hprot);
|
||||
|
||||
/**
|
||||
* Unregister a hart protection mechanism
|
||||
*
|
||||
* @param hprot pointer to hart protection mechanism
|
||||
*/
|
||||
void sbi_hart_protection_unregister(struct sbi_hart_protection *hprot);
|
||||
|
||||
/**
|
||||
* Configure protection for current HART
|
||||
*
|
||||
* @param scratch pointer to scratch space of current HART
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
int sbi_hart_protection_configure(struct sbi_scratch *scratch);
|
||||
|
||||
/**
|
||||
* Unconfigure protection for current HART
|
||||
*
|
||||
* @param scratch pointer to scratch space of current HART
|
||||
*/
|
||||
void sbi_hart_protection_unconfigure(struct sbi_scratch *scratch);
|
||||
|
||||
/**
|
||||
* Create temporary mapping to access address range on current HART
|
||||
*
|
||||
* @param base base address of the temporary mapping
|
||||
* @param size size of the temporary mapping
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
int sbi_hart_protection_map_range(unsigned long base, unsigned long size);
|
||||
|
||||
/**
|
||||
* Destroy temporary mapping to access address range on current HART
|
||||
*
|
||||
* @param base base address of the temporary mapping
|
||||
* @param size size of the temporary mapping
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
int sbi_hart_protection_unmap_range(unsigned long base, unsigned long size);
|
||||
|
||||
#endif /* __SBI_HART_PROTECTION_H__ */
|
||||
@@ -23,6 +23,9 @@ struct sbi_ipi_device {
|
||||
/** Name of the IPI device */
|
||||
char name[32];
|
||||
|
||||
/** Ratings of the IPI device (higher is better) */
|
||||
unsigned long rating;
|
||||
|
||||
/** Send IPI to a target HART index */
|
||||
void (*ipi_send)(u32 hart_index);
|
||||
|
||||
@@ -85,13 +88,13 @@ int sbi_ipi_send_halt(ulong hmask, ulong hbase);
|
||||
|
||||
void sbi_ipi_process(void);
|
||||
|
||||
int sbi_ipi_raw_send(u32 hartindex);
|
||||
int sbi_ipi_raw_send(u32 hartindex, bool all_devices);
|
||||
|
||||
void sbi_ipi_raw_clear(void);
|
||||
void sbi_ipi_raw_clear(bool all_devices);
|
||||
|
||||
const struct sbi_ipi_device *sbi_ipi_get_device(void);
|
||||
|
||||
void sbi_ipi_set_device(const struct sbi_ipi_device *dev);
|
||||
void sbi_ipi_add_device(const struct sbi_ipi_device *dev);
|
||||
|
||||
int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot);
|
||||
|
||||
|
||||
@@ -160,4 +160,28 @@ static inline void sbi_list_del_init(struct sbi_dlist *entry)
|
||||
&pos->member != (head); \
|
||||
pos = sbi_list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* Iterate over list of given type safe against removal of list entry
|
||||
* @param pos the type * to use as a loop cursor.
|
||||
* @param n another type * to use as temporary storage.
|
||||
* @param head the head for your list.
|
||||
* @param member the name of the list_struct within the struct.
|
||||
*/
|
||||
#define sbi_list_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = sbi_list_entry((head)->next, typeof(*pos), member), \
|
||||
n = sbi_list_entry(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = sbi_list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* Iterate over list of given type in reverse order
|
||||
* @param pos the type * to use as a loop cursor.
|
||||
* @param head the head for your list.
|
||||
* @param member the name of the list_struct within the struct.
|
||||
*/
|
||||
#define sbi_list_for_each_entry_reverse(pos, head, member) \
|
||||
for (pos = sbi_list_entry((head)->prev, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = sbi_list_entry(pos->member.prev, typeof(*pos), member))
|
||||
|
||||
#endif
|
||||
|
||||
@@ -116,9 +116,6 @@ struct sbi_platform_operations {
|
||||
/** Initialize the platform interrupt controller during cold boot */
|
||||
int (*irqchip_init)(void);
|
||||
|
||||
/** Initialize IPI during cold boot */
|
||||
int (*ipi_init)(void);
|
||||
|
||||
/** Get tlb flush limit value **/
|
||||
u64 (*get_tlbr_flush_limit)(void);
|
||||
|
||||
@@ -528,20 +525,6 @@ static inline int sbi_platform_irqchip_init(const struct sbi_platform *plat)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the platform IPI support during cold boot
|
||||
*
|
||||
* @param plat pointer to struct sbi_platform
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
static inline int sbi_platform_ipi_init(const struct sbi_platform *plat)
|
||||
{
|
||||
if (plat && sbi_platform_ops(plat)->ipi_init)
|
||||
return sbi_platform_ops(plat)->ipi_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the platform timer during cold boot
|
||||
*
|
||||
|
||||
@@ -69,11 +69,18 @@ struct sbi_system_suspend_device {
|
||||
* return from system_suspend() may ignore this parameter.
|
||||
*/
|
||||
int (*system_suspend)(u32 sleep_type, unsigned long mmode_resume_addr);
|
||||
|
||||
/**
|
||||
* Resume the system from system suspend
|
||||
*/
|
||||
void (*system_resume)(void);
|
||||
};
|
||||
|
||||
const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void);
|
||||
void sbi_system_suspend_set_device(struct sbi_system_suspend_device *dev);
|
||||
void sbi_system_suspend_test_enable(void);
|
||||
void sbi_system_resume(void);
|
||||
bool sbi_system_is_suspended(void);
|
||||
bool sbi_system_suspend_supported(u32 sleep_type);
|
||||
int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque);
|
||||
|
||||
|
||||
@@ -54,6 +54,8 @@ do { \
|
||||
|
||||
#define SBI_TLB_INFO_SIZE sizeof(struct sbi_tlb_info)
|
||||
|
||||
void __sbi_sfence_vma_all();
|
||||
|
||||
int sbi_tlb_request(ulong hmask, ulong hbase, struct sbi_tlb_info *tinfo);
|
||||
|
||||
int sbi_tlb_init(struct sbi_scratch *scratch, bool cold_boot);
|
||||
|
||||
@@ -127,6 +127,9 @@
|
||||
|
||||
/** Representation of register state at time of trap/interrupt */
|
||||
struct sbi_trap_regs {
|
||||
union {
|
||||
unsigned long gprs[32];
|
||||
struct {
|
||||
/** zero register state */
|
||||
unsigned long zero;
|
||||
/** ra register state */
|
||||
@@ -191,6 +194,8 @@ struct sbi_trap_regs {
|
||||
unsigned long t5;
|
||||
/** t6 register state */
|
||||
unsigned long t6;
|
||||
};
|
||||
};
|
||||
/** mepc register state */
|
||||
unsigned long mepc;
|
||||
/** mstatus register state */
|
||||
@@ -199,6 +204,21 @@ struct sbi_trap_regs {
|
||||
unsigned long mstatusH;
|
||||
};
|
||||
|
||||
_Static_assert(
|
||||
sizeof(((struct sbi_trap_regs *)0)->gprs) ==
|
||||
offsetof(struct sbi_trap_regs, t6) +
|
||||
sizeof(((struct sbi_trap_regs *)0)->t6),
|
||||
"struct sbi_trap_regs's layout differs between gprs and named members");
|
||||
|
||||
#define REG_VAL(idx, regs) ((regs)->gprs[(idx)])
|
||||
|
||||
#define GET_RS1(insn, regs) REG_VAL(GET_RS1_NUM(insn), regs)
|
||||
#define GET_RS2(insn, regs) REG_VAL(GET_RS2_NUM(insn), regs)
|
||||
#define GET_RS1S(insn, regs) REG_VAL(GET_RS1S_NUM(insn), regs)
|
||||
#define GET_RS2S(insn, regs) REG_VAL(GET_RS2S_NUM(insn), regs)
|
||||
#define GET_RS2C(insn, regs) REG_VAL(GET_RS2C_NUM(insn), regs)
|
||||
#define SET_RD(insn, regs, val) (REG_VAL(GET_RD_NUM(insn), regs) = (val))
|
||||
|
||||
/** Representation of trap details */
|
||||
struct sbi_trap_info {
|
||||
/** cause Trap exception cause */
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
/* clang-format off */
|
||||
|
||||
typedef char s8;
|
||||
typedef signed char s8;
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned char uint8_t;
|
||||
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
#ifndef __SBI_VISIBILITY_H__
|
||||
#define __SBI_VISIBILITY_H__
|
||||
|
||||
#ifndef __DTS__
|
||||
/*
|
||||
* Declare all global objects with hidden visibility so access is PC-relative
|
||||
* instead of going through the GOT.
|
||||
*/
|
||||
#pragma GCC visibility push(hidden)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
69
include/sbi_utils/cache/cache.h
vendored
Normal file
69
include/sbi_utils/cache/cache.h
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#ifndef __CACHE_H__
|
||||
#define __CACHE_H__
|
||||
|
||||
#include <sbi/sbi_list.h>
|
||||
#include <sbi/sbi_types.h>
|
||||
|
||||
#define CACHE_NAME_LEN 32
|
||||
|
||||
struct cache_device;
|
||||
|
||||
struct cache_ops {
|
||||
/** Warm init **/
|
||||
int (*warm_init)(struct cache_device *dev);
|
||||
/** Flush entire cache **/
|
||||
int (*cache_flush_all)(struct cache_device *dev);
|
||||
};
|
||||
|
||||
struct cache_device {
|
||||
/** Name of the device **/
|
||||
char name[CACHE_NAME_LEN];
|
||||
/** List node for search **/
|
||||
struct sbi_dlist node;
|
||||
/** Point to the next level cache **/
|
||||
struct cache_device *next;
|
||||
/** Cache Management Operations **/
|
||||
struct cache_ops *ops;
|
||||
/** CPU private cache **/
|
||||
bool cpu_private;
|
||||
/** The unique id of this cache device **/
|
||||
u32 id;
|
||||
};
|
||||
|
||||
/**
|
||||
* Find a registered cache device
|
||||
*
|
||||
* @param id unique ID of the cache device
|
||||
*
|
||||
* @return the cache device or NULL
|
||||
*/
|
||||
struct cache_device *cache_find(u32 id);
|
||||
|
||||
/**
|
||||
* Register a cache device
|
||||
*
|
||||
* cache_device->id must be initialized already and must not change during the life
|
||||
* of the cache_device object.
|
||||
*
|
||||
* @param dev the cache device to register
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int cache_add(struct cache_device *dev);
|
||||
|
||||
/**
|
||||
* Flush the entire cache
|
||||
*
|
||||
* @param dev the cache to flush
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int cache_flush_all(struct cache_device *dev);
|
||||
|
||||
#endif
|
||||
34
include/sbi_utils/cache/fdt_cache.h
vendored
Normal file
34
include/sbi_utils/cache/fdt_cache.h
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#ifndef __FDT_CACHE_H__
|
||||
#define __FDT_CACHE_H__
|
||||
|
||||
#include <sbi_utils/cache/cache.h>
|
||||
|
||||
/**
|
||||
* Register a cache device using information from the DT
|
||||
*
|
||||
* @param fdt devicetree blob
|
||||
* @param noff offset of a node in the devicetree blob
|
||||
* @param dev cache device to register for this devicetree node
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int fdt_cache_add(const void *fdt, int noff, struct cache_device *dev);
|
||||
|
||||
/**
|
||||
* Get the cache device referencd by the "next-level-cache" property of a DT node
|
||||
*
|
||||
* @param fdt devicetree blob
|
||||
* @param noff offset of a node in the devicetree blob
|
||||
* @param out_dev location to return the cache device
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int fdt_next_cache_get(const void *fdt, int noff, struct cache_device **out_dev);
|
||||
|
||||
#endif
|
||||
40
include/sbi_utils/cache/fdt_cmo_helper.h
vendored
Normal file
40
include/sbi_utils/cache/fdt_cmo_helper.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#ifndef __FDT_CMO_HELPER_H__
|
||||
#define __FDT_CMO_HELPER_H__
|
||||
|
||||
#ifdef CONFIG_FDT_CACHE
|
||||
/**
|
||||
* Flush the private first level cache of the current hart
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int fdt_cmo_private_flc_flush_all(void);
|
||||
|
||||
/**
|
||||
* Flush the last level cache of the current hart
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int fdt_cmo_llc_flush_all(void);
|
||||
|
||||
/**
|
||||
* Initialize the cache devices for each hart
|
||||
*
|
||||
* @param fdt devicetree blob
|
||||
* @param cold_boot cold init or warm init
|
||||
*
|
||||
* @return 0 on success, or a negative error code on failure
|
||||
*/
|
||||
int fdt_cmo_init(bool cold_boot);
|
||||
|
||||
#else
|
||||
|
||||
static inline int fdt_cmo_init(bool cold_boot) { return 0; }
|
||||
|
||||
#endif /* CONFIG_FDT_CACHE */
|
||||
#endif /* __FDT_CMO_HELPER_H__ */
|
||||
20
include/sbi_utils/hsm/fdt_hsm_sifive_inst.h
Normal file
20
include/sbi_utils/hsm/fdt_hsm_sifive_inst.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#ifndef __FDT_HSM_SIFIVE_INST_H__
|
||||
#define __FDT_HSM_SIFIVE_INST_H__
|
||||
|
||||
static inline void sifive_cease(void)
|
||||
{
|
||||
__asm__ __volatile__(".insn 0x30500073" ::: "memory");
|
||||
}
|
||||
|
||||
static inline void sifive_cflush(void)
|
||||
{
|
||||
__asm__ __volatile__(".insn 0xfc000073" ::: "memory");
|
||||
}
|
||||
|
||||
#endif
|
||||
14
include/sbi_utils/hsm/fdt_hsm_sifive_tmc0.h
Normal file
14
include/sbi_utils/hsm/fdt_hsm_sifive_tmc0.h
Normal file
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#ifndef __FDT_HSM_SIFIVE_TMC0_H__
|
||||
#define __FDT_HSM_SIFIVE_TMC0_H__
|
||||
|
||||
int sifive_tmc0_set_wakemask_enareq(u32 hartid);
|
||||
void sifive_tmc0_set_wakemask_disreq(u32 hartid);
|
||||
bool sifive_tmc0_is_pg(u32 hartid);
|
||||
|
||||
#endif
|
||||
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* Authors:
|
||||
* Anup Patel <anup.patel@wdc.com>
|
||||
*/
|
||||
|
||||
#ifndef __FDT_IPI_H__
|
||||
#define __FDT_IPI_H__
|
||||
|
||||
#include <sbi/sbi_types.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
|
||||
#ifdef CONFIG_FDT_IPI
|
||||
|
||||
int fdt_ipi_init(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int fdt_ipi_init(void) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -33,6 +33,7 @@ struct aplic_delegate_data {
|
||||
struct aplic_data {
|
||||
/* Private members */
|
||||
struct sbi_irqchip_device irqchip;
|
||||
struct sbi_dlist node;
|
||||
/* Public members */
|
||||
unsigned long addr;
|
||||
unsigned long size;
|
||||
@@ -48,4 +49,6 @@ struct aplic_data {
|
||||
|
||||
int aplic_cold_irqchip_init(struct aplic_data *aplic);
|
||||
|
||||
void aplic_reinit_all(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -216,7 +216,10 @@ enum rpmi_servicegroup_id {
|
||||
RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004,
|
||||
RPMI_SRVGRP_HSM = 0x0005,
|
||||
RPMI_SRVGRP_CPPC = 0x0006,
|
||||
RPMI_SRVGRP_VOLTAGE = 0x00007,
|
||||
RPMI_SRVGRP_CLOCK = 0x0008,
|
||||
RPMI_SRVGRP_DEVICE_POWER = 0x0009,
|
||||
RPMI_SRVGRP_PERFORMANCE = 0x0000A,
|
||||
RPMI_SRVGRP_ID_MAX_COUNT,
|
||||
|
||||
/* Reserved range for service groups */
|
||||
@@ -611,6 +614,86 @@ struct rpmi_cppc_hart_list_resp {
|
||||
u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)];
|
||||
};
|
||||
|
||||
/** RPMI Voltage ServiceGroup Service IDs */
|
||||
enum rpmi_voltage_service_id {
|
||||
RPMI_VOLTAGE_SRV_ENABLE_NOTIFICATION = 0x01,
|
||||
RPMI_VOLTAGE_SRV_GET_NUM_DOMAINS = 0x02,
|
||||
RPMI_VOLTAGE_SRV_GET_ATTRIBUTES = 0x03,
|
||||
RPMI_VOLTAGE_SRV_GET_SUPPORTED_LEVELS = 0x04,
|
||||
RPMI_VOLTAGE_SRV_SET_CONFIG = 0x05,
|
||||
RPMI_VOLTAGE_SRV_GET_CONFIG = 0x06,
|
||||
RPMI_VOLTAGE_SRV_SET_LEVEL = 0x07,
|
||||
RPMI_VOLTAGE_SRV_GET_LEVEL = 0x08,
|
||||
RPMI_VOLTAGE_SRV_MAX_COUNT,
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_num_domains_resp {
|
||||
s32 status;
|
||||
u32 num_domains;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_attributes_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_attributes_resp {
|
||||
s32 status;
|
||||
u32 flags;
|
||||
u32 num_levels;
|
||||
u32 transition_latency;
|
||||
u8 name[16];
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_supported_rate_req {
|
||||
u32 domain_id;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_supported_rate_resp {
|
||||
s32 status;
|
||||
u32 flags;
|
||||
u32 remaining;
|
||||
u32 returned;
|
||||
u32 level[0];
|
||||
};
|
||||
|
||||
struct rpmi_voltage_set_config_req {
|
||||
u32 domain_id;
|
||||
#define RPMI_CLOCK_CONFIG_ENABLE (1U << 0)
|
||||
u32 config;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_set_config_resp {
|
||||
s32 status;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_config_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_config_resp {
|
||||
s32 status;
|
||||
u32 config;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_set_level_req {
|
||||
u32 domain_id;
|
||||
s32 level;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_set_level_resp {
|
||||
s32 status;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_level_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_voltage_get_level_resp {
|
||||
s32 status;
|
||||
s32 level;
|
||||
};
|
||||
|
||||
/** RPMI Clock ServiceGroup Service IDs */
|
||||
enum rpmi_clock_service_id {
|
||||
RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01,
|
||||
@@ -703,4 +786,165 @@ struct rpmi_clock_get_rate_resp {
|
||||
u32 clock_rate_high;
|
||||
};
|
||||
|
||||
/** RPMI Device Power ServiceGroup Service IDs */
|
||||
enum rpmi_dpwr_service_id {
|
||||
RPMI_DPWR_SRV_ENABLE_NOTIFICATION = 0x01,
|
||||
RPMI_DPWR_SRV_GET_NUM_DOMAINS = 0x02,
|
||||
RPMI_DPWR_SRV_GET_ATTRIBUTES = 0x03,
|
||||
RPMI_DPWR_SRV_SET_STATE = 0x04,
|
||||
RPMI_DPWR_SRV_GET_STATE = 0x05,
|
||||
RPMI_DPWR_SRV_MAX_COUNT,
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_get_num_domain_resp {
|
||||
s32 status;
|
||||
u32 num_domain;
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_get_attrs_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_get_attrs_resp {
|
||||
s32 status;
|
||||
u32 flags;
|
||||
u32 transition_latency;
|
||||
u8 name[16];
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_set_state_req {
|
||||
u32 domain_id;
|
||||
u32 state;
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_set_state_resp {
|
||||
s32 status;
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_get_state_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_dpwr_get_state_resp {
|
||||
s32 status;
|
||||
u32 state;
|
||||
};
|
||||
|
||||
/** RPMI Performance ServiceGroup Service IDs */
|
||||
enum rpmi_performance_service_id {
|
||||
RPMI_PERF_SRV_ENABLE_NOTIFICATION = 0x01,
|
||||
RPMI_PERF_SRV_GET_NUM_DOMAINS = 0x02,
|
||||
RPMI_PERF_SRV_GET_ATTRIBUTES = 0x03,
|
||||
RPMI_PERF_SRV_GET_SUPPORTED_LEVELS = 0x04,
|
||||
RPMI_PERF_SRV_GET_LEVEL = 0x05,
|
||||
RPMI_PERF_SRV_SET_LEVEL = 0x06,
|
||||
RPMI_PERF_SRV_GET_LIMIT = 0x07,
|
||||
RPMI_PERF_SRV_SET_LIMIT = 0x08,
|
||||
RPMI_PERF_SRV_GET_FAST_CHANNEL_REGION = 0x09,
|
||||
RPMI_PERF_SRV_GET_FAST_CHANNEL_ATTRIBUTES = 0x0A,
|
||||
RPMI_PERF_SRV_MAX_COUNT,
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_num_domain_resp {
|
||||
s32 status;
|
||||
u32 num_domains;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_attrs_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_attrs_resp {
|
||||
s32 status;
|
||||
u32 flags;
|
||||
u32 num_level;
|
||||
u32 latency;
|
||||
u8 name[16];
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_supported_level_req {
|
||||
u32 domain_id;
|
||||
u32 perf_level_index;
|
||||
};
|
||||
|
||||
struct rpmi_perf_domain_level {
|
||||
u32 level_index;
|
||||
u32 opp_level;
|
||||
u32 power_cost_uw;
|
||||
u32 transition_latency_us;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_supported_level_resp {
|
||||
s32 status;
|
||||
u32 reserve;
|
||||
u32 remaining;
|
||||
u32 returned;
|
||||
struct rpmi_perf_domain_level level[0];
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_level_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_level_resp {
|
||||
s32 status;
|
||||
u32 level_index;
|
||||
};
|
||||
|
||||
struct rpmi_perf_set_level_req {
|
||||
u32 domain_id;
|
||||
u32 level_index;
|
||||
};
|
||||
|
||||
struct rpmi_perf_set_level_resp {
|
||||
s32 status;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_limit_req {
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_limit_resp {
|
||||
s32 status;
|
||||
u32 level_index_max;
|
||||
u32 level_index_min;
|
||||
};
|
||||
|
||||
struct rpmi_perf_set_limit_req {
|
||||
u32 domain_id;
|
||||
u32 level_index_max;
|
||||
u32 level_index_min;
|
||||
};
|
||||
|
||||
struct rpmi_perf_set_limit_resp {
|
||||
s32 status;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_fast_chn_region_resp {
|
||||
s32 status;
|
||||
u32 region_phy_addr_low;
|
||||
u32 region_phy_addr_high;
|
||||
u32 region_size_low;
|
||||
u32 region_size_high;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_fast_chn_attr_req {
|
||||
u32 domain_id;
|
||||
u32 service_id;
|
||||
};
|
||||
|
||||
struct rpmi_perf_get_fast_chn_attr_resp {
|
||||
s32 status;
|
||||
u32 flags;
|
||||
u32 region_offset_low;
|
||||
u32 region_offset_high;
|
||||
u32 region_size;
|
||||
u32 db_addr_low;
|
||||
u32 db_addr_high;
|
||||
u32 db_id_low;
|
||||
u32 db_id_high;
|
||||
u32 db_perserved_low;
|
||||
u32 db_perserved_high;
|
||||
};
|
||||
|
||||
#endif /* !__RPMI_MSGPROT_H__ */
|
||||
|
||||
@@ -42,6 +42,11 @@ struct aclint_mtimer_data {
|
||||
void (*time_wr)(bool timecmp, u64 value, volatile u64 *addr);
|
||||
};
|
||||
|
||||
struct aclint_mtimer_data *aclint_get_mtimer_data(void);
|
||||
|
||||
void aclint_mtimer_update(struct aclint_mtimer_data *mt,
|
||||
struct aclint_mtimer_data *ref);
|
||||
|
||||
void aclint_mtimer_sync(struct aclint_mtimer_data *mt);
|
||||
|
||||
void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt,
|
||||
|
||||
@@ -75,6 +75,8 @@ libsbi-objs-y += sbi_emulate_csr.o
|
||||
libsbi-objs-y += sbi_fifo.o
|
||||
libsbi-objs-y += sbi_fwft.o
|
||||
libsbi-objs-y += sbi_hart.o
|
||||
libsbi-objs-y += sbi_hart_pmp.o
|
||||
libsbi-objs-y += sbi_hart_protection.o
|
||||
libsbi-objs-y += sbi_heap.o
|
||||
libsbi-objs-y += sbi_math.o
|
||||
libsbi-objs-y += sbi_hfence.o
|
||||
|
||||
@@ -93,77 +93,91 @@ void misa_string(int xlen, char *out, unsigned int out_sz)
|
||||
|
||||
unsigned long csr_read_num(int csr_num)
|
||||
{
|
||||
#define switchcase_csr_read(__csr_num, __val) \
|
||||
#define switchcase_csr_read(__csr_num) \
|
||||
case __csr_num: \
|
||||
__val = csr_read(__csr_num); \
|
||||
break;
|
||||
#define switchcase_csr_read_2(__csr_num, __val) \
|
||||
switchcase_csr_read(__csr_num + 0, __val) \
|
||||
switchcase_csr_read(__csr_num + 1, __val)
|
||||
#define switchcase_csr_read_4(__csr_num, __val) \
|
||||
switchcase_csr_read_2(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_2(__csr_num + 2, __val)
|
||||
#define switchcase_csr_read_8(__csr_num, __val) \
|
||||
switchcase_csr_read_4(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_4(__csr_num + 4, __val)
|
||||
#define switchcase_csr_read_16(__csr_num, __val) \
|
||||
switchcase_csr_read_8(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_8(__csr_num + 8, __val)
|
||||
#define switchcase_csr_read_32(__csr_num, __val) \
|
||||
switchcase_csr_read_16(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_16(__csr_num + 16, __val)
|
||||
#define switchcase_csr_read_64(__csr_num, __val) \
|
||||
switchcase_csr_read_32(__csr_num + 0, __val) \
|
||||
switchcase_csr_read_32(__csr_num + 32, __val)
|
||||
|
||||
unsigned long ret = 0;
|
||||
return csr_read(__csr_num);
|
||||
#define switchcase_csr_read_2(__csr_num) \
|
||||
switchcase_csr_read(__csr_num + 0) \
|
||||
switchcase_csr_read(__csr_num + 1)
|
||||
#define switchcase_csr_read_4(__csr_num) \
|
||||
switchcase_csr_read_2(__csr_num + 0) \
|
||||
switchcase_csr_read_2(__csr_num + 2)
|
||||
#define switchcase_csr_read_8(__csr_num) \
|
||||
switchcase_csr_read_4(__csr_num + 0) \
|
||||
switchcase_csr_read_4(__csr_num + 4)
|
||||
#define switchcase_csr_read_16(__csr_num) \
|
||||
switchcase_csr_read_8(__csr_num + 0) \
|
||||
switchcase_csr_read_8(__csr_num + 8)
|
||||
#define switchcase_csr_read_32(__csr_num) \
|
||||
switchcase_csr_read_16(__csr_num + 0) \
|
||||
switchcase_csr_read_16(__csr_num + 16)
|
||||
#define switchcase_csr_read_64(__csr_num) \
|
||||
switchcase_csr_read_32(__csr_num + 0) \
|
||||
switchcase_csr_read_32(__csr_num + 32)
|
||||
#define switchcase_csr_read_128(__csr_num) \
|
||||
switchcase_csr_read_64(__csr_num + 0) \
|
||||
switchcase_csr_read_64(__csr_num + 64)
|
||||
#define switchcase_csr_read_256(__csr_num) \
|
||||
switchcase_csr_read_128(__csr_num + 0) \
|
||||
switchcase_csr_read_128(__csr_num + 128)
|
||||
|
||||
switch (csr_num) {
|
||||
switchcase_csr_read_16(CSR_PMPCFG0, ret)
|
||||
switchcase_csr_read_64(CSR_PMPADDR0, ret)
|
||||
switchcase_csr_read(CSR_MCYCLE, ret)
|
||||
switchcase_csr_read(CSR_MINSTRET, ret)
|
||||
switchcase_csr_read(CSR_MHPMCOUNTER3, ret)
|
||||
switchcase_csr_read_4(CSR_MHPMCOUNTER4, ret)
|
||||
switchcase_csr_read_8(CSR_MHPMCOUNTER8, ret)
|
||||
switchcase_csr_read_16(CSR_MHPMCOUNTER16, ret)
|
||||
switchcase_csr_read(CSR_MCOUNTINHIBIT, ret)
|
||||
switchcase_csr_read(CSR_MCYCLECFG, ret)
|
||||
switchcase_csr_read(CSR_MINSTRETCFG, ret)
|
||||
switchcase_csr_read(CSR_MHPMEVENT3, ret)
|
||||
switchcase_csr_read_4(CSR_MHPMEVENT4, ret)
|
||||
switchcase_csr_read_8(CSR_MHPMEVENT8, ret)
|
||||
switchcase_csr_read_16(CSR_MHPMEVENT16, ret)
|
||||
switchcase_csr_read_16(CSR_PMPCFG0)
|
||||
switchcase_csr_read_64(CSR_PMPADDR0)
|
||||
switchcase_csr_read(CSR_MCYCLE)
|
||||
switchcase_csr_read(CSR_MINSTRET)
|
||||
switchcase_csr_read(CSR_MHPMCOUNTER3)
|
||||
switchcase_csr_read_4(CSR_MHPMCOUNTER4)
|
||||
switchcase_csr_read_8(CSR_MHPMCOUNTER8)
|
||||
switchcase_csr_read_16(CSR_MHPMCOUNTER16)
|
||||
switchcase_csr_read(CSR_MCOUNTINHIBIT)
|
||||
switchcase_csr_read(CSR_MCYCLECFG)
|
||||
switchcase_csr_read(CSR_MINSTRETCFG)
|
||||
switchcase_csr_read(CSR_MHPMEVENT3)
|
||||
switchcase_csr_read_4(CSR_MHPMEVENT4)
|
||||
switchcase_csr_read_8(CSR_MHPMEVENT8)
|
||||
switchcase_csr_read_16(CSR_MHPMEVENT16)
|
||||
#if __riscv_xlen == 32
|
||||
switchcase_csr_read(CSR_MCYCLEH, ret)
|
||||
switchcase_csr_read(CSR_MINSTRETH, ret)
|
||||
switchcase_csr_read(CSR_MHPMCOUNTER3H, ret)
|
||||
switchcase_csr_read_4(CSR_MHPMCOUNTER4H, ret)
|
||||
switchcase_csr_read_8(CSR_MHPMCOUNTER8H, ret)
|
||||
switchcase_csr_read_16(CSR_MHPMCOUNTER16H, ret)
|
||||
switchcase_csr_read(CSR_MCYCLEH)
|
||||
switchcase_csr_read(CSR_MINSTRETH)
|
||||
switchcase_csr_read(CSR_MHPMCOUNTER3H)
|
||||
switchcase_csr_read_4(CSR_MHPMCOUNTER4H)
|
||||
switchcase_csr_read_8(CSR_MHPMCOUNTER8H)
|
||||
switchcase_csr_read_16(CSR_MHPMCOUNTER16H)
|
||||
/**
|
||||
* The CSR range M[CYCLE, INSTRET]CFGH are available only if smcntrpmf
|
||||
* extension is present. The caller must ensure that.
|
||||
*/
|
||||
switchcase_csr_read(CSR_MCYCLECFGH, ret)
|
||||
switchcase_csr_read(CSR_MINSTRETCFGH, ret)
|
||||
switchcase_csr_read(CSR_MCYCLECFGH)
|
||||
switchcase_csr_read(CSR_MINSTRETCFGH)
|
||||
/**
|
||||
* The CSR range MHPMEVENT[3-16]H are available only if sscofpmf
|
||||
* extension is present. The caller must ensure that.
|
||||
*/
|
||||
switchcase_csr_read(CSR_MHPMEVENT3H, ret)
|
||||
switchcase_csr_read_4(CSR_MHPMEVENT4H, ret)
|
||||
switchcase_csr_read_8(CSR_MHPMEVENT8H, ret)
|
||||
switchcase_csr_read_16(CSR_MHPMEVENT16H, ret)
|
||||
switchcase_csr_read(CSR_MHPMEVENT3H)
|
||||
switchcase_csr_read_4(CSR_MHPMEVENT4H)
|
||||
switchcase_csr_read_8(CSR_MHPMEVENT8H)
|
||||
switchcase_csr_read_16(CSR_MHPMEVENT16H)
|
||||
#endif
|
||||
switchcase_csr_read_256(CSR_CUSTOM0_U_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM1_U_RO_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM2_S_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM3_S_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM4_S_RO_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM5_HS_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM6_HS_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM7_HS_RO_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM8_M_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM9_M_RW_BASE)
|
||||
switchcase_csr_read_64(CSR_CUSTOM10_M_RO_BASE)
|
||||
|
||||
default:
|
||||
sbi_panic("%s: Unknown CSR %#x", __func__, csr_num);
|
||||
break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
#undef switchcase_csr_read_256
|
||||
#undef switchcase_csr_read_128
|
||||
#undef switchcase_csr_read_64
|
||||
#undef switchcase_csr_read_32
|
||||
#undef switchcase_csr_read_16
|
||||
@@ -197,6 +211,12 @@ void csr_write_num(int csr_num, unsigned long val)
|
||||
#define switchcase_csr_write_64(__csr_num, __val) \
|
||||
switchcase_csr_write_32(__csr_num + 0, __val) \
|
||||
switchcase_csr_write_32(__csr_num + 32, __val)
|
||||
#define switchcase_csr_write_128(__csr_num, __val) \
|
||||
switchcase_csr_write_64(__csr_num + 0, __val) \
|
||||
switchcase_csr_write_64(__csr_num + 64, __val)
|
||||
#define switchcase_csr_write_256(__csr_num, __val) \
|
||||
switchcase_csr_write_128(__csr_num + 0, __val) \
|
||||
switchcase_csr_write_128(__csr_num + 128, __val)
|
||||
|
||||
switch (csr_num) {
|
||||
switchcase_csr_write_16(CSR_PMPCFG0, val)
|
||||
@@ -228,12 +248,21 @@ void csr_write_num(int csr_num, unsigned long val)
|
||||
switchcase_csr_write_4(CSR_MHPMEVENT4, val)
|
||||
switchcase_csr_write_8(CSR_MHPMEVENT8, val)
|
||||
switchcase_csr_write_16(CSR_MHPMEVENT16, val)
|
||||
switchcase_csr_write_256(CSR_CUSTOM0_U_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM2_S_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM3_S_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM5_HS_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM6_HS_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM8_M_RW_BASE, val)
|
||||
switchcase_csr_write_64(CSR_CUSTOM9_M_RW_BASE, val)
|
||||
|
||||
default:
|
||||
sbi_panic("%s: Unknown CSR %#x", __func__, csr_num);
|
||||
break;
|
||||
}
|
||||
|
||||
#undef switchcase_csr_write_256
|
||||
#undef switchcase_csr_write_128
|
||||
#undef switchcase_csr_write_64
|
||||
#undef switchcase_csr_write_32
|
||||
#undef switchcase_csr_write_16
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <sbi/sbi_trap.h>
|
||||
#include <sbi/sbi_dbtr.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/riscv_encoding.h>
|
||||
#include <sbi/riscv_asm.h>
|
||||
|
||||
@@ -336,6 +337,19 @@ static void dbtr_trigger_setup(struct sbi_dbtr_trigger *trig,
|
||||
if (__test_bit(RV_DBTR_BIT(MC6, VS), &tdata1))
|
||||
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
|
||||
break;
|
||||
case RISCV_DBTR_TRIG_ICOUNT:
|
||||
if (__test_bit(RV_DBTR_BIT(ICOUNT, U), &tdata1))
|
||||
__set_bit(RV_DBTR_BIT(TS, U), &trig->state);
|
||||
|
||||
if (__test_bit(RV_DBTR_BIT(ICOUNT, S), &tdata1))
|
||||
__set_bit(RV_DBTR_BIT(TS, S), &trig->state);
|
||||
|
||||
if (__test_bit(RV_DBTR_BIT(ICOUNT, VU), &tdata1))
|
||||
__set_bit(RV_DBTR_BIT(TS, VU), &trig->state);
|
||||
|
||||
if (__test_bit(RV_DBTR_BIT(ICOUNT, VS), &tdata1))
|
||||
__set_bit(RV_DBTR_BIT(TS, VS), &trig->state);
|
||||
break;
|
||||
default:
|
||||
sbi_dprintf("%s: Unknown type (tdata1: 0x%lx Type: %ld)\n",
|
||||
__func__, tdata1, TDATA1_GET_TYPE(tdata1));
|
||||
@@ -379,6 +393,16 @@ static void dbtr_trigger_enable(struct sbi_dbtr_trigger *trig)
|
||||
update_bit(state & RV_DBTR_BIT_MASK(TS, S),
|
||||
RV_DBTR_BIT(MC6, S), &trig->tdata1);
|
||||
break;
|
||||
case RISCV_DBTR_TRIG_ICOUNT:
|
||||
update_bit(state & RV_DBTR_BIT_MASK(TS, VU),
|
||||
RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
|
||||
update_bit(state & RV_DBTR_BIT_MASK(TS, VS),
|
||||
RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
|
||||
update_bit(state & RV_DBTR_BIT_MASK(TS, U),
|
||||
RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
|
||||
update_bit(state & RV_DBTR_BIT_MASK(TS, S),
|
||||
RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -418,6 +442,12 @@ static void dbtr_trigger_disable(struct sbi_dbtr_trigger *trig)
|
||||
__clear_bit(RV_DBTR_BIT(MC6, U), &trig->tdata1);
|
||||
__clear_bit(RV_DBTR_BIT(MC6, S), &trig->tdata1);
|
||||
break;
|
||||
case RISCV_DBTR_TRIG_ICOUNT:
|
||||
__clear_bit(RV_DBTR_BIT(ICOUNT, VU), &trig->tdata1);
|
||||
__clear_bit(RV_DBTR_BIT(ICOUNT, VS), &trig->tdata1);
|
||||
__clear_bit(RV_DBTR_BIT(ICOUNT, U), &trig->tdata1);
|
||||
__clear_bit(RV_DBTR_BIT(ICOUNT, S), &trig->tdata1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -441,6 +471,7 @@ static int dbtr_trigger_supported(unsigned long type)
|
||||
switch (type) {
|
||||
case RISCV_DBTR_TRIG_MCONTROL:
|
||||
case RISCV_DBTR_TRIG_MCONTROL6:
|
||||
case RISCV_DBTR_TRIG_ICOUNT:
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
@@ -462,6 +493,11 @@ static int dbtr_trigger_valid(unsigned long type, unsigned long tdata)
|
||||
!(tdata & RV_DBTR_BIT_MASK(MC6, M)))
|
||||
return 1;
|
||||
break;
|
||||
case RISCV_DBTR_TRIG_ICOUNT:
|
||||
if (!(tdata & RV_DBTR_BIT_MASK(ICOUNT, DMODE)) &&
|
||||
!(tdata & RV_DBTR_BIT_MASK(ICOUNT, M)))
|
||||
return 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -523,17 +559,22 @@ int sbi_dbtr_read_trig(unsigned long smode,
|
||||
|
||||
shmem_base = hart_shmem_base(hs);
|
||||
|
||||
sbi_hart_map_saddr((unsigned long)shmem_base,
|
||||
sbi_hart_protection_map_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
|
||||
xmit = &entry->data;
|
||||
trig = INDEX_TO_TRIGGER((_idx + trig_idx_base));
|
||||
csr_write(CSR_TSELECT, trig->index);
|
||||
trig->tdata1 = csr_read(CSR_TDATA1);
|
||||
trig->tdata2 = csr_read(CSR_TDATA2);
|
||||
trig->tdata3 = csr_read(CSR_TDATA3);
|
||||
xmit->tstate = cpu_to_lle(trig->state);
|
||||
xmit->tdata1 = cpu_to_lle(trig->tdata1);
|
||||
xmit->tdata2 = cpu_to_lle(trig->tdata2);
|
||||
xmit->tdata3 = cpu_to_lle(trig->tdata3);
|
||||
}
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
|
||||
return SBI_SUCCESS;
|
||||
}
|
||||
@@ -557,7 +598,7 @@ int sbi_dbtr_install_trig(unsigned long smode,
|
||||
return SBI_ERR_NO_SHMEM;
|
||||
|
||||
shmem_base = hart_shmem_base(hs);
|
||||
sbi_hart_map_saddr((unsigned long)shmem_base,
|
||||
sbi_hart_protection_map_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
|
||||
/* Check requested triggers configuration */
|
||||
@@ -567,20 +608,23 @@ int sbi_dbtr_install_trig(unsigned long smode,
|
||||
|
||||
if (!dbtr_trigger_supported(TDATA1_GET_TYPE(ctrl))) {
|
||||
*out = _idx;
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
return SBI_ERR_FAILED;
|
||||
}
|
||||
|
||||
if (!dbtr_trigger_valid(TDATA1_GET_TYPE(ctrl), ctrl)) {
|
||||
*out = _idx;
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
return SBI_ERR_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
if (hs->available_trigs < trig_count) {
|
||||
*out = hs->available_trigs;
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
return SBI_ERR_FAILED;
|
||||
}
|
||||
|
||||
@@ -600,7 +644,9 @@ int sbi_dbtr_install_trig(unsigned long smode,
|
||||
xmit->idx = cpu_to_lle(trig->index);
|
||||
|
||||
}
|
||||
sbi_hart_unmap_saddr();
|
||||
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base,
|
||||
trig_count * sizeof(*entry));
|
||||
|
||||
return SBI_SUCCESS;
|
||||
}
|
||||
@@ -673,23 +719,23 @@ int sbi_dbtr_update_trig(unsigned long smode,
|
||||
return SBI_ERR_BAD_RANGE;
|
||||
|
||||
for_each_trig_entry(shmem_base, trig_count, typeof(*entry), entry) {
|
||||
sbi_hart_map_saddr((unsigned long)entry, sizeof(*entry));
|
||||
sbi_hart_protection_map_range((unsigned long)entry, sizeof(*entry));
|
||||
trig_idx = entry->id.idx;
|
||||
|
||||
if (trig_idx >= hs->total_trigs) {
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry));
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
}
|
||||
|
||||
trig = INDEX_TO_TRIGGER(trig_idx);
|
||||
|
||||
if (!(trig->state & RV_DBTR_BIT_MASK(TS, MAPPED))) {
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry));
|
||||
return SBI_ERR_FAILED;
|
||||
}
|
||||
|
||||
dbtr_trigger_setup(trig, &entry->data);
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)entry, sizeof(*entry));
|
||||
dbtr_trigger_enable(trig);
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ static u32 domain_count = 0;
|
||||
static bool domain_finalized = false;
|
||||
|
||||
#define ROOT_REGION_MAX 32
|
||||
static u32 root_memregs_count = 0;
|
||||
|
||||
struct sbi_domain root = {
|
||||
.name = "root",
|
||||
@@ -122,6 +121,80 @@ void sbi_domain_memregion_init(unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int sbi_domain_get_smepmp_flags(struct sbi_domain_memregion *reg)
|
||||
{
|
||||
unsigned int pmp_flags = 0;
|
||||
unsigned long rstart, rend;
|
||||
|
||||
if ((reg->flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == 0) {
|
||||
/*
|
||||
* Region is inaccessible in all privilege modes.
|
||||
*
|
||||
* SmePMP allows two encodings for an inaccessible region:
|
||||
* - pmpcfg.LRWX = 0000 (Inaccessible region)
|
||||
* - pmpcfg.LRWX = 1000 (Locked inaccessible region)
|
||||
* We use the first encoding here.
|
||||
*/
|
||||
return 0;
|
||||
} else if (SBI_DOMAIN_MEMREGION_IS_SHARED(reg->flags)) {
|
||||
/* Read only for both M and SU modes */
|
||||
if (SBI_DOMAIN_MEMREGION_IS_SUR_MR(reg->flags))
|
||||
pmp_flags = (PMP_L | PMP_R | PMP_W | PMP_X);
|
||||
|
||||
/* Execute for SU but Read/Execute for M mode */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MRX(reg->flags))
|
||||
/* locked region */
|
||||
pmp_flags = (PMP_L | PMP_W | PMP_X);
|
||||
|
||||
/* Execute only for both M and SU modes */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MX(reg->flags))
|
||||
pmp_flags = (PMP_L | PMP_W);
|
||||
|
||||
/* Read/Write for both M and SU modes */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SURW_MRW(reg->flags))
|
||||
pmp_flags = (PMP_W | PMP_X);
|
||||
|
||||
/* Read only for SU mode but Read/Write for M mode */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUR_MRW(reg->flags))
|
||||
pmp_flags = (PMP_W);
|
||||
} else if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
/*
|
||||
* When smepmp is supported and used, M region cannot have RWX
|
||||
* permissions on any region.
|
||||
*/
|
||||
if ((reg->flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK)
|
||||
== SBI_DOMAIN_MEMREGION_M_RWX) {
|
||||
sbi_printf("%s: M-mode only regions cannot have"
|
||||
"RWX permissions\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* M-mode only access regions are always locked */
|
||||
pmp_flags |= PMP_L;
|
||||
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
} else if (SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(reg->flags)) {
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
} else {
|
||||
rstart = reg->base;
|
||||
rend = (reg->order < __riscv_xlen) ? rstart + ((1UL << reg->order) - 1) : -1UL;
|
||||
sbi_printf("%s: Unsupported Smepmp permissions on region 0x%"PRILX"-0x%"PRILX"\n",
|
||||
__func__, rstart, rend);
|
||||
}
|
||||
|
||||
return pmp_flags;
|
||||
}
|
||||
|
||||
bool sbi_domain_check_addr(const struct sbi_domain *dom,
|
||||
unsigned long addr, unsigned long mode,
|
||||
unsigned long access_flags)
|
||||
@@ -162,7 +235,11 @@ bool sbi_domain_check_addr(const struct sbi_domain *dom,
|
||||
rstart + ((1UL << reg->order) - 1) : -1UL;
|
||||
if (rstart <= addr && addr <= rend) {
|
||||
rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
|
||||
if (mmio != rmmio)
|
||||
/*
|
||||
* MMIO devices may appear in regions without the flag set (such as the
|
||||
* default region), but MMIO device regions should not be used as memory.
|
||||
*/
|
||||
if (!mmio && rmmio)
|
||||
return false;
|
||||
return ((rrwx & rwx) == rwx) ? true : false;
|
||||
}
|
||||
@@ -218,6 +295,19 @@ static bool is_region_compatible(const struct sbi_domain_memregion *regA,
|
||||
static bool is_region_before(const struct sbi_domain_memregion *regA,
|
||||
const struct sbi_domain_memregion *regB)
|
||||
{
|
||||
/*
|
||||
* Enforce firmware region ordering for memory access
|
||||
* under SmePMP.
|
||||
* Place firmware regions first to ensure consistent
|
||||
* PMP entries during domain context switches.
|
||||
*/
|
||||
if (SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regA->flags) &&
|
||||
!SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regB->flags))
|
||||
return true;
|
||||
if (!SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regA->flags) &&
|
||||
SBI_DOMAIN_MEMREGION_IS_FIRMWARE(regB->flags))
|
||||
return false;
|
||||
|
||||
if (regA->order < regB->order)
|
||||
return true;
|
||||
|
||||
@@ -281,6 +371,17 @@ static void clear_region(struct sbi_domain_memregion* reg)
|
||||
sbi_memset(reg, 0x0, sizeof(*reg));
|
||||
}
|
||||
|
||||
static int sbi_domain_used_memregions(const struct sbi_domain *dom)
|
||||
{
|
||||
int count = 0;
|
||||
struct sbi_domain_memregion *reg;
|
||||
|
||||
sbi_domain_for_each_memregion(dom, reg)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int sanitize_domain(struct sbi_domain *dom)
|
||||
{
|
||||
u32 i, j, count;
|
||||
@@ -319,9 +420,7 @@ static int sanitize_domain(struct sbi_domain *dom)
|
||||
}
|
||||
|
||||
/* Count memory regions */
|
||||
count = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg)
|
||||
count++;
|
||||
count = sbi_domain_used_memregions(dom);
|
||||
|
||||
/* Check presence of firmware regions */
|
||||
if (!dom->fw_region_inited) {
|
||||
@@ -344,7 +443,7 @@ static int sanitize_domain(struct sbi_domain *dom)
|
||||
}
|
||||
|
||||
/* Remove covered regions */
|
||||
while(i < (count - 1)) {
|
||||
for (i = 0; i < (count - 1);) {
|
||||
is_covered = false;
|
||||
reg = &dom->regions[i];
|
||||
|
||||
@@ -464,6 +563,8 @@ void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
|
||||
sbi_printf("M: ");
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
|
||||
sbi_printf("%cI", (k++) ? ',' : '(');
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_FW)
|
||||
sbi_printf("%cF", (k++) ? ',' : '(');
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
|
||||
sbi_printf("%cR", (k++) ? ',' : '(');
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
|
||||
@@ -603,6 +704,7 @@ static int root_add_memregion(const struct sbi_domain_memregion *reg)
|
||||
int rc;
|
||||
bool reg_merged;
|
||||
struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
|
||||
int root_memregs_count = sbi_domain_used_memregions(&root);
|
||||
|
||||
/* Sanity checks */
|
||||
if (!reg || domain_finalized || !root.regions ||
|
||||
@@ -773,6 +875,7 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
|
||||
int rc;
|
||||
struct sbi_hartmask *root_hmask;
|
||||
struct sbi_domain_memregion *root_memregs;
|
||||
int root_memregs_count = 0;
|
||||
|
||||
SBI_INIT_LIST_HEAD(&domain_list);
|
||||
|
||||
@@ -817,13 +920,15 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
|
||||
/* Root domain firmware memory region */
|
||||
sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
|
||||
(SBI_DOMAIN_MEMREGION_M_READABLE |
|
||||
SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
|
||||
SBI_DOMAIN_MEMREGION_M_EXECUTABLE |
|
||||
SBI_DOMAIN_MEMREGION_FW),
|
||||
&root_memregs[root_memregs_count++]);
|
||||
|
||||
sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
|
||||
(scratch->fw_size - scratch->fw_rw_offset),
|
||||
(SBI_DOMAIN_MEMREGION_M_READABLE |
|
||||
SBI_DOMAIN_MEMREGION_M_WRITABLE),
|
||||
SBI_DOMAIN_MEMREGION_M_WRITABLE |
|
||||
SBI_DOMAIN_MEMREGION_FW),
|
||||
&root_memregs[root_memregs_count++]);
|
||||
|
||||
root.fw_region_inited = true;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_scratch.h>
|
||||
#include <sbi/sbi_string.h>
|
||||
@@ -45,6 +46,8 @@ struct hart_context {
|
||||
unsigned long scounteren;
|
||||
/** Supervisor environment configuration register */
|
||||
unsigned long senvcfg;
|
||||
/** Supervisor resource management configuration register */
|
||||
unsigned long srmcfg;
|
||||
|
||||
/** Reference to the owning domain */
|
||||
struct sbi_domain *dom;
|
||||
@@ -92,17 +95,22 @@ static void hart_context_set(struct sbi_domain *dom, u32 hartindex,
|
||||
*
|
||||
* @param ctx pointer to the current HART context
|
||||
* @param dom_ctx pointer to the target domain context
|
||||
*
|
||||
* @return 0 on success and negative error code on failure
|
||||
*/
|
||||
static void switch_to_next_domain_context(struct hart_context *ctx,
|
||||
static int switch_to_next_domain_context(struct hart_context *ctx,
|
||||
struct hart_context *dom_ctx)
|
||||
{
|
||||
u32 hartindex = current_hartindex();
|
||||
struct sbi_trap_context *trap_ctx;
|
||||
struct sbi_domain *current_dom = ctx->dom;
|
||||
struct sbi_domain *target_dom = dom_ctx->dom;
|
||||
struct sbi_domain *current_dom, *target_dom;
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
unsigned int pmp_count = sbi_hart_pmp_count(scratch);
|
||||
|
||||
if (!ctx || !dom_ctx || ctx == dom_ctx)
|
||||
return SBI_EINVAL;
|
||||
|
||||
current_dom = ctx->dom;
|
||||
target_dom = dom_ctx->dom;
|
||||
/* Assign current hart to target domain */
|
||||
spin_lock(¤t_dom->assigned_harts_lock);
|
||||
sbi_hartmask_clear_hartindex(hartindex, ¤t_dom->assigned_harts);
|
||||
@@ -115,11 +123,8 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
|
||||
spin_unlock(&target_dom->assigned_harts_lock);
|
||||
|
||||
/* Reconfigure PMP settings for the new domain */
|
||||
for (int i = 0; i < pmp_count; i++) {
|
||||
sbi_platform_pmp_disable(sbi_platform_thishart_ptr(), i);
|
||||
pmp_disable(i);
|
||||
}
|
||||
sbi_hart_pmp_configure(scratch);
|
||||
sbi_hart_protection_unconfigure(scratch);
|
||||
sbi_hart_protection_configure(scratch);
|
||||
|
||||
/* Save current CSR context and restore target domain's CSR context */
|
||||
ctx->sstatus = csr_swap(CSR_SSTATUS, dom_ctx->sstatus);
|
||||
@@ -135,6 +140,8 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
|
||||
ctx->scounteren = csr_swap(CSR_SCOUNTEREN, dom_ctx->scounteren);
|
||||
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12)
|
||||
ctx->senvcfg = csr_swap(CSR_SENVCFG, dom_ctx->senvcfg);
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSQOSID))
|
||||
ctx->srmcfg = csr_swap(CSR_SRMCFG, dom_ctx->srmcfg);
|
||||
|
||||
/* Save current trap state and restore target domain's trap state */
|
||||
trap_ctx = sbi_trap_get_context(scratch);
|
||||
@@ -156,13 +163,57 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
|
||||
else
|
||||
sbi_hsm_hart_stop(scratch, true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hart_context_init(u32 hartindex)
|
||||
{
|
||||
struct hart_context *ctx;
|
||||
struct sbi_domain *dom;
|
||||
|
||||
sbi_domain_for_each(dom) {
|
||||
if (!sbi_hartmask_test_hartindex(hartindex,
|
||||
dom->possible_harts))
|
||||
continue;
|
||||
|
||||
ctx = sbi_zalloc(sizeof(struct hart_context));
|
||||
if (!ctx)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
/* Bind context and domain */
|
||||
ctx->dom = dom;
|
||||
hart_context_set(dom, hartindex, ctx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sbi_domain_context_enter(struct sbi_domain *dom)
|
||||
{
|
||||
int rc;
|
||||
struct hart_context *dom_ctx;
|
||||
struct hart_context *ctx = hart_context_thishart_get();
|
||||
struct hart_context *dom_ctx = hart_context_get(dom, current_hartindex());
|
||||
|
||||
/* Target domain must not be same as the current domain */
|
||||
if (!dom || dom == sbi_domain_thishart_ptr())
|
||||
return SBI_EINVAL;
|
||||
|
||||
/*
|
||||
* If it's first time to call `enter` on the current hart, no
|
||||
* context allocated before. Allocate context for each valid
|
||||
* domain on the current hart.
|
||||
*/
|
||||
if (!ctx) {
|
||||
rc = hart_context_init(current_hartindex());
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ctx = hart_context_thishart_get();
|
||||
if (!ctx)
|
||||
return SBI_EINVAL;
|
||||
}
|
||||
|
||||
dom_ctx = hart_context_get(dom, current_hartindex());
|
||||
/* Validate the domain context existence */
|
||||
if (!dom_ctx)
|
||||
return SBI_EINVAL;
|
||||
@@ -170,13 +221,12 @@ int sbi_domain_context_enter(struct sbi_domain *dom)
|
||||
/* Update target context's previous context to indicate the caller */
|
||||
dom_ctx->prev_ctx = ctx;
|
||||
|
||||
switch_to_next_domain_context(ctx, dom_ctx);
|
||||
|
||||
return 0;
|
||||
return switch_to_next_domain_context(ctx, dom_ctx);
|
||||
}
|
||||
|
||||
int sbi_domain_context_exit(void)
|
||||
{
|
||||
int rc;
|
||||
u32 hartindex = current_hartindex();
|
||||
struct sbi_domain *dom;
|
||||
struct hart_context *ctx = hart_context_thishart_get();
|
||||
@@ -188,21 +238,13 @@ int sbi_domain_context_exit(void)
|
||||
* its context on the current hart if valid.
|
||||
*/
|
||||
if (!ctx) {
|
||||
sbi_domain_for_each(dom) {
|
||||
if (!sbi_hartmask_test_hartindex(hartindex,
|
||||
dom->possible_harts))
|
||||
continue;
|
||||
|
||||
dom_ctx = sbi_zalloc(sizeof(struct hart_context));
|
||||
if (!dom_ctx)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
/* Bind context and domain */
|
||||
dom_ctx->dom = dom;
|
||||
hart_context_set(dom, hartindex, dom_ctx);
|
||||
}
|
||||
rc = hart_context_init(current_hartindex());
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ctx = hart_context_thishart_get();
|
||||
if (!ctx)
|
||||
return SBI_EINVAL;
|
||||
}
|
||||
|
||||
dom_ctx = ctx->prev_ctx;
|
||||
@@ -226,9 +268,7 @@ int sbi_domain_context_exit(void)
|
||||
if (!dom_ctx)
|
||||
dom_ctx = hart_context_get(&root, hartindex);
|
||||
|
||||
switch_to_next_domain_context(ctx, dom_ctx);
|
||||
|
||||
return 0;
|
||||
return switch_to_next_domain_context(ctx, dom_ctx);
|
||||
}
|
||||
|
||||
int sbi_domain_context_init(void)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <sbi/sbi_ecall_interface.h>
|
||||
#include <sbi/sbi_trap.h>
|
||||
#include <sbi/riscv_asm.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
|
||||
static int sbi_ecall_dbcn_handler(unsigned long extid, unsigned long funcid,
|
||||
struct sbi_trap_regs *regs,
|
||||
@@ -46,12 +46,12 @@ static int sbi_ecall_dbcn_handler(unsigned long extid, unsigned long funcid,
|
||||
regs->a1, regs->a0, smode,
|
||||
SBI_DOMAIN_READ|SBI_DOMAIN_WRITE))
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
sbi_hart_map_saddr(regs->a1, regs->a0);
|
||||
sbi_hart_protection_map_range(regs->a1, regs->a0);
|
||||
if (funcid == SBI_EXT_DBCN_CONSOLE_WRITE)
|
||||
out->value = sbi_nputs((const char *)regs->a1, regs->a0);
|
||||
else
|
||||
out->value = sbi_ngets((char *)regs->a1, regs->a0);
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range(regs->a1, regs->a0);
|
||||
return 0;
|
||||
case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
|
||||
sbi_putc(regs->a0);
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hfence.h>
|
||||
#include <sbi/sbi_scratch.h>
|
||||
#include <sbi/sbi_string.h>
|
||||
#include <sbi/sbi_tlb.h>
|
||||
#include <sbi/sbi_types.h>
|
||||
|
||||
#include <sbi/riscv_asm.h>
|
||||
@@ -167,7 +169,16 @@ static int fwft_adue_supported(struct fwft_config *conf)
|
||||
|
||||
static int fwft_set_adue(struct fwft_config *conf, unsigned long value)
|
||||
{
|
||||
return fwft_menvcfg_set_bit(value, ENVCFG_ADUE_SHIFT);
|
||||
int res = fwft_menvcfg_set_bit(value, ENVCFG_ADUE_SHIFT);
|
||||
|
||||
if (res == SBI_OK) {
|
||||
__sbi_sfence_vma_all();
|
||||
|
||||
if (misa_extension('H'))
|
||||
__sbi_hfence_gvma_all();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int fwft_get_adue(struct fwft_config *conf, unsigned long *value)
|
||||
|
||||
@@ -13,23 +13,21 @@
|
||||
#include <sbi/riscv_fp.h>
|
||||
#include <sbi/sbi_bitops.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_csr_detect.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_math.h>
|
||||
#include <sbi/sbi_hart_pmp.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi/sbi_pmu.h>
|
||||
#include <sbi/sbi_string.h>
|
||||
#include <sbi/sbi_trap.h>
|
||||
#include <sbi/sbi_hfence.h>
|
||||
|
||||
extern void __sbi_expected_trap(void);
|
||||
extern void __sbi_expected_trap_hext(void);
|
||||
|
||||
void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
|
||||
|
||||
static unsigned long hart_features_offset;
|
||||
unsigned long hart_features_offset;
|
||||
|
||||
static void mstatus_init(struct sbi_scratch *scratch)
|
||||
{
|
||||
@@ -49,10 +47,10 @@ static void mstatus_init(struct sbi_scratch *scratch)
|
||||
|
||||
csr_write(CSR_MSTATUS, mstatus_val);
|
||||
|
||||
/* Disable user mode usage of all perf counters except default ones (CY, TM, IR) */
|
||||
/* Disable user mode usage of all perf counters except TM */
|
||||
if (misa_extension('S') &&
|
||||
sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
|
||||
csr_write(CSR_SCOUNTEREN, 7);
|
||||
csr_write(CSR_SCOUNTEREN, 0x02);
|
||||
|
||||
/**
|
||||
* OpenSBI doesn't use any PMU counters in M-mode.
|
||||
@@ -110,6 +108,11 @@ static void mstatus_init(struct sbi_scratch *scratch)
|
||||
else
|
||||
mstateen_val &= ~SMSTATEEN0_CTR;
|
||||
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSQOSID))
|
||||
mstateen_val |= SMSTATEEN0_SRMCFG;
|
||||
else
|
||||
mstateen_val &= ~SMSTATEEN0_SRMCFG;
|
||||
|
||||
csr_write64(CSR_MSTATEEN0, mstateen_val);
|
||||
csr_write64(CSR_MSTATEEN1, SMSTATEEN_STATEN);
|
||||
csr_write64(CSR_MSTATEEN2, SMSTATEEN_STATEN);
|
||||
@@ -269,30 +272,6 @@ unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch)
|
||||
return hfeatures->mhpm_mask;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures =
|
||||
sbi_scratch_offset_ptr(scratch, hart_features_offset);
|
||||
|
||||
return hfeatures->pmp_count;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures =
|
||||
sbi_scratch_offset_ptr(scratch, hart_features_offset);
|
||||
|
||||
return hfeatures->pmp_log2gran;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures =
|
||||
sbi_scratch_offset_ptr(scratch, hart_features_offset);
|
||||
|
||||
return hfeatures->pmp_addr_bits;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures =
|
||||
@@ -301,307 +280,6 @@ unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
|
||||
return hfeatures->mhpm_bits;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns Smepmp flags for a given domain and region based on permissions.
|
||||
*/
|
||||
static unsigned int sbi_hart_get_smepmp_flags(struct sbi_scratch *scratch,
|
||||
struct sbi_domain *dom,
|
||||
struct sbi_domain_memregion *reg)
|
||||
{
|
||||
unsigned int pmp_flags = 0;
|
||||
|
||||
if (SBI_DOMAIN_MEMREGION_IS_SHARED(reg->flags)) {
|
||||
/* Read only for both M and SU modes */
|
||||
if (SBI_DOMAIN_MEMREGION_IS_SUR_MR(reg->flags))
|
||||
pmp_flags = (PMP_L | PMP_R | PMP_W | PMP_X);
|
||||
|
||||
/* Execute for SU but Read/Execute for M mode */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MRX(reg->flags))
|
||||
/* locked region */
|
||||
pmp_flags = (PMP_L | PMP_W | PMP_X);
|
||||
|
||||
/* Execute only for both M and SU modes */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUX_MX(reg->flags))
|
||||
pmp_flags = (PMP_L | PMP_W);
|
||||
|
||||
/* Read/Write for both M and SU modes */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SURW_MRW(reg->flags))
|
||||
pmp_flags = (PMP_W | PMP_X);
|
||||
|
||||
/* Read only for SU mode but Read/Write for M mode */
|
||||
else if (SBI_DOMAIN_MEMREGION_IS_SUR_MRW(reg->flags))
|
||||
pmp_flags = (PMP_W);
|
||||
} else if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
/*
|
||||
* When smepmp is supported and used, M region cannot have RWX
|
||||
* permissions on any region.
|
||||
*/
|
||||
if ((reg->flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK)
|
||||
== SBI_DOMAIN_MEMREGION_M_RWX) {
|
||||
sbi_printf("%s: M-mode only regions cannot have"
|
||||
"RWX permissions\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* M-mode only access regions are always locked */
|
||||
pmp_flags |= PMP_L;
|
||||
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
} else if (SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(reg->flags)) {
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
}
|
||||
|
||||
return pmp_flags;
|
||||
}
|
||||
|
||||
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
|
||||
struct sbi_domain *dom,
|
||||
struct sbi_domain_memregion *reg,
|
||||
unsigned int pmp_idx,
|
||||
unsigned int pmp_flags,
|
||||
unsigned int pmp_log2gran,
|
||||
unsigned long pmp_addr_max)
|
||||
{
|
||||
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
|
||||
|
||||
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
|
||||
pmp_idx, reg->flags, pmp_flags,
|
||||
reg->base, reg->order);
|
||||
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
|
||||
} else {
|
||||
sbi_printf("Can not configure pmp for domain %s because"
|
||||
" memory region address 0x%lx or size 0x%lx "
|
||||
"is not in range.\n", dom->name, reg->base,
|
||||
reg->order);
|
||||
}
|
||||
}
|
||||
|
||||
static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
|
||||
unsigned int pmp_count,
|
||||
unsigned int pmp_log2gran,
|
||||
unsigned long pmp_addr_max)
|
||||
{
|
||||
struct sbi_domain_memregion *reg;
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
unsigned int pmp_idx, pmp_flags;
|
||||
|
||||
/*
|
||||
* Set the RLB so that, we can write to PMP entries without
|
||||
* enforcement even if some entries are locked.
|
||||
*/
|
||||
csr_set(CSR_MSECCFG, MSECCFG_RLB);
|
||||
|
||||
/* Disable the reserved entry */
|
||||
pmp_disable(SBI_SMEPMP_RESV_ENTRY);
|
||||
|
||||
/* Program M-only regions when MML is not set. */
|
||||
pmp_idx = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
/* Skip reserved entry */
|
||||
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
|
||||
pmp_idx++;
|
||||
if (pmp_count <= pmp_idx)
|
||||
break;
|
||||
|
||||
/* Skip shared and SU-only regions */
|
||||
if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
pmp_idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg);
|
||||
if (!pmp_flags)
|
||||
return 0;
|
||||
|
||||
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
}
|
||||
|
||||
/* Set the MML to enforce new encoding */
|
||||
csr_set(CSR_MSECCFG, MSECCFG_MML);
|
||||
|
||||
/* Program shared and SU-only regions */
|
||||
pmp_idx = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
/* Skip reserved entry */
|
||||
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
|
||||
pmp_idx++;
|
||||
if (pmp_count <= pmp_idx)
|
||||
break;
|
||||
|
||||
/* Skip M-only regions */
|
||||
if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
pmp_idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmp_flags = sbi_hart_get_smepmp_flags(scratch, dom, reg);
|
||||
if (!pmp_flags)
|
||||
return 0;
|
||||
|
||||
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
}
|
||||
|
||||
/*
|
||||
* All entries are programmed.
|
||||
* Keep the RLB bit so that dynamic mappings can be done.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
|
||||
unsigned int pmp_count,
|
||||
unsigned int pmp_log2gran,
|
||||
unsigned long pmp_addr_max)
|
||||
{
|
||||
struct sbi_domain_memregion *reg;
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
unsigned int pmp_idx = 0;
|
||||
unsigned int pmp_flags;
|
||||
unsigned long pmp_addr;
|
||||
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
if (pmp_count <= pmp_idx)
|
||||
break;
|
||||
|
||||
pmp_flags = 0;
|
||||
|
||||
/*
|
||||
* If permissions are to be enforced for all modes on
|
||||
* this region, the lock bit should be set.
|
||||
*/
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
|
||||
pmp_flags |= PMP_L;
|
||||
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
|
||||
pmp_addr = reg->base >> PMP_SHIFT;
|
||||
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
|
||||
pmp_idx, reg->flags, pmp_flags,
|
||||
reg->base, reg->order);
|
||||
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
|
||||
} else {
|
||||
sbi_printf("Can not configure pmp for domain %s because"
|
||||
" memory region address 0x%lx or size 0x%lx "
|
||||
"is not in range.\n", dom->name, reg->base,
|
||||
reg->order);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
|
||||
{
|
||||
/* shared R/W access for M and S/U mode */
|
||||
unsigned int pmp_flags = (PMP_W | PMP_X);
|
||||
unsigned long order, base = 0;
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
|
||||
/* If Smepmp is not supported no special mapping is required */
|
||||
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
|
||||
return SBI_OK;
|
||||
|
||||
if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
|
||||
return SBI_ENOSPC;
|
||||
|
||||
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
|
||||
order <= __riscv_xlen; order++) {
|
||||
if (order < __riscv_xlen) {
|
||||
base = addr & ~((1UL << order) - 1UL);
|
||||
if ((base <= addr) &&
|
||||
(addr < (base + (1UL << order))) &&
|
||||
(base <= (addr + size - 1UL)) &&
|
||||
((addr + size - 1UL) < (base + (1UL << order))))
|
||||
break;
|
||||
} else {
|
||||
return SBI_EFAIL;
|
||||
}
|
||||
}
|
||||
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
|
||||
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
|
||||
pmp_flags, base, order);
|
||||
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
int sbi_hart_unmap_saddr(void)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
|
||||
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
|
||||
return SBI_OK;
|
||||
|
||||
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
|
||||
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
|
||||
}
|
||||
|
||||
int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
|
||||
{
|
||||
int rc;
|
||||
unsigned int pmp_bits, pmp_log2gran;
|
||||
unsigned int pmp_count = sbi_hart_pmp_count(scratch);
|
||||
unsigned long pmp_addr_max;
|
||||
|
||||
if (!pmp_count)
|
||||
return 0;
|
||||
|
||||
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
|
||||
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
|
||||
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
|
||||
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
|
||||
rc = sbi_hart_smepmp_configure(scratch, pmp_count,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
else
|
||||
rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
|
||||
/*
|
||||
* As per section 3.7.2 of privileged specification v1.12,
|
||||
* virtual address translations can be speculatively performed
|
||||
* (even before actual access). These, along with PMP traslations,
|
||||
* can be cached. This can pose a problem with CPU hotplug
|
||||
* and non-retentive suspend scenario because PMP states are
|
||||
* not preserved.
|
||||
* It is advisable to flush the caching structures under such
|
||||
* conditions.
|
||||
*/
|
||||
if (misa_extension('S')) {
|
||||
__asm__ __volatile__("sfence.vma");
|
||||
|
||||
/*
|
||||
* If hypervisor mode is supported, flush caching
|
||||
* structures in guest mode too.
|
||||
*/
|
||||
if (misa_extension('H'))
|
||||
__sbi_hfence_gvma_all();
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int sbi_hart_priv_version(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures =
|
||||
@@ -714,7 +392,10 @@ const struct sbi_hart_ext_data sbi_hart_ext[] = {
|
||||
__SBI_HART_EXT_DATA(ssdbltrp, SBI_HART_EXT_SSDBLTRP),
|
||||
__SBI_HART_EXT_DATA(smctr, SBI_HART_EXT_SMCTR),
|
||||
__SBI_HART_EXT_DATA(ssctr, SBI_HART_EXT_SSCTR),
|
||||
__SBI_HART_EXT_DATA(ssqosid, SBI_HART_EXT_SSQOSID),
|
||||
__SBI_HART_EXT_DATA(ssstateen, SBI_HART_EXT_SSSTATEEN),
|
||||
__SBI_HART_EXT_DATA(xsfcflushdlone, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1),
|
||||
__SBI_HART_EXT_DATA(xsfcease, SBI_HART_EXT_XSIFIVE_CEASE),
|
||||
};
|
||||
|
||||
_Static_assert(SBI_HART_EXT_MAX == array_size(sbi_hart_ext),
|
||||
@@ -1037,10 +718,6 @@ int sbi_hart_reinit(struct sbi_scratch *scratch)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = delegate_traps(scratch);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1068,6 +745,16 @@ int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (cold_boot) {
|
||||
rc = sbi_hart_pmp_init(scratch);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = delegate_traps(scratch);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return sbi_hart_reinit(scratch);
|
||||
}
|
||||
|
||||
|
||||
356
lib/sbi/sbi_hart_pmp.c
Normal file
356
lib/sbi/sbi_hart_pmp.c
Normal file
@@ -0,0 +1,356 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
||||
*/
|
||||
|
||||
#include <sbi/sbi_bitmap.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_hfence.h>
|
||||
#include <sbi/sbi_math.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi/sbi_tlb.h>
|
||||
#include <sbi/riscv_asm.h>
|
||||
|
||||
/*
|
||||
* Smepmp enforces access boundaries between M-mode and
|
||||
* S/U-mode. When it is enabled, the PMPs are programmed
|
||||
* such that M-mode doesn't have access to S/U-mode memory.
|
||||
*
|
||||
* To give M-mode R/W access to the shared memory between M and
|
||||
* S/U-mode, first entry is reserved. It is disabled at boot.
|
||||
* When shared memory access is required, the physical address
|
||||
* should be programmed into the first PMP entry with R/W
|
||||
* permissions to the M-mode. Once the work is done, it should be
|
||||
* unmapped. sbi_hart_protection_map_range/sbi_hart_protection_unmap_range
|
||||
* function pair should be used to map/unmap the shared memory.
|
||||
*/
|
||||
#define SBI_SMEPMP_RESV_ENTRY 0
|
||||
|
||||
static DECLARE_BITMAP(fw_smepmp_ids, PMP_COUNT);
|
||||
static bool fw_smepmp_ids_inited;
|
||||
|
||||
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
|
||||
|
||||
return hfeatures->pmp_count;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
|
||||
|
||||
return hfeatures->pmp_log2gran;
|
||||
}
|
||||
|
||||
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_features *hfeatures = sbi_hart_features_ptr(scratch);
|
||||
|
||||
return hfeatures->pmp_addr_bits;
|
||||
}
|
||||
|
||||
bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx)
|
||||
{
|
||||
if (!fw_smepmp_ids_inited)
|
||||
return false;
|
||||
|
||||
return bitmap_test(fw_smepmp_ids, pmp_idx) ? true : false;
|
||||
}
|
||||
|
||||
static void sbi_hart_pmp_fence(void)
|
||||
{
|
||||
/*
|
||||
* As per section 3.7.2 of privileged specification v1.12,
|
||||
* virtual address translations can be speculatively performed
|
||||
* (even before actual access). These, along with PMP traslations,
|
||||
* can be cached. This can pose a problem with CPU hotplug
|
||||
* and non-retentive suspend scenario because PMP states are
|
||||
* not preserved.
|
||||
* It is advisable to flush the caching structures under such
|
||||
* conditions.
|
||||
*/
|
||||
if (misa_extension('S')) {
|
||||
__sbi_sfence_vma_all();
|
||||
|
||||
/*
|
||||
* If hypervisor mode is supported, flush caching
|
||||
* structures in guest mode too.
|
||||
*/
|
||||
if (misa_extension('H'))
|
||||
__sbi_hfence_gvma_all();
|
||||
}
|
||||
}
|
||||
|
||||
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
|
||||
struct sbi_domain *dom,
|
||||
struct sbi_domain_memregion *reg,
|
||||
unsigned int pmp_idx,
|
||||
unsigned int pmp_flags,
|
||||
unsigned int pmp_log2gran,
|
||||
unsigned long pmp_addr_max)
|
||||
{
|
||||
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
|
||||
|
||||
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
|
||||
pmp_idx, reg->flags, pmp_flags,
|
||||
reg->base, reg->order);
|
||||
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
|
||||
} else {
|
||||
sbi_printf("Can not configure pmp for domain %s because"
|
||||
" memory region address 0x%lx or size 0x%lx "
|
||||
"is not in range.\n", dom->name, reg->base,
|
||||
reg->order);
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_valid_pmp_idx(unsigned int pmp_count, unsigned int pmp_idx)
|
||||
{
|
||||
if (pmp_count > pmp_idx)
|
||||
return true;
|
||||
|
||||
sbi_printf("error: insufficient PMP entries\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_domain_memregion *reg;
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
unsigned int pmp_log2gran, pmp_bits;
|
||||
unsigned int pmp_idx, pmp_count;
|
||||
unsigned long pmp_addr_max;
|
||||
unsigned int pmp_flags;
|
||||
|
||||
pmp_count = sbi_hart_pmp_count(scratch);
|
||||
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
|
||||
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
|
||||
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
|
||||
|
||||
/*
|
||||
* Set the RLB so that, we can write to PMP entries without
|
||||
* enforcement even if some entries are locked.
|
||||
*/
|
||||
csr_set(CSR_MSECCFG, MSECCFG_RLB);
|
||||
|
||||
/* Disable the reserved entry */
|
||||
pmp_disable(SBI_SMEPMP_RESV_ENTRY);
|
||||
|
||||
/* Program M-only regions when MML is not set. */
|
||||
pmp_idx = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
/* Skip reserved entry */
|
||||
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
|
||||
pmp_idx++;
|
||||
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
|
||||
return SBI_EFAIL;
|
||||
|
||||
/* Skip shared and SU-only regions */
|
||||
if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
pmp_idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Track firmware PMP entries to preserve them during
|
||||
* domain switches. Under SmePMP, M-mode requires
|
||||
* explicit PMP entries to access firmware code/data.
|
||||
* These entries must remain enabled across domain
|
||||
* context switches to prevent M-mode access faults.
|
||||
*/
|
||||
if (SBI_DOMAIN_MEMREGION_IS_FIRMWARE(reg->flags)) {
|
||||
if (fw_smepmp_ids_inited) {
|
||||
/* Check inconsistent firmware region */
|
||||
if (!sbi_hart_smepmp_is_fw_region(pmp_idx))
|
||||
return SBI_EINVAL;
|
||||
} else {
|
||||
bitmap_set(fw_smepmp_ids, pmp_idx, 1);
|
||||
}
|
||||
}
|
||||
|
||||
pmp_flags = sbi_domain_get_smepmp_flags(reg);
|
||||
|
||||
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
}
|
||||
|
||||
fw_smepmp_ids_inited = true;
|
||||
|
||||
/* Set the MML to enforce new encoding */
|
||||
csr_set(CSR_MSECCFG, MSECCFG_MML);
|
||||
|
||||
/* Program shared and SU-only regions */
|
||||
pmp_idx = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
/* Skip reserved entry */
|
||||
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
|
||||
pmp_idx++;
|
||||
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
|
||||
return SBI_EFAIL;
|
||||
|
||||
/* Skip M-only regions */
|
||||
if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
|
||||
pmp_idx++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmp_flags = sbi_domain_get_smepmp_flags(reg);
|
||||
|
||||
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
|
||||
pmp_log2gran, pmp_addr_max);
|
||||
}
|
||||
|
||||
/*
|
||||
* All entries are programmed.
|
||||
* Keep the RLB bit so that dynamic mappings can be done.
|
||||
*/
|
||||
|
||||
sbi_hart_pmp_fence();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbi_hart_smepmp_map_range(struct sbi_scratch *scratch,
|
||||
unsigned long addr, unsigned long size)
|
||||
{
|
||||
/* shared R/W access for M and S/U mode */
|
||||
unsigned int pmp_flags = (PMP_W | PMP_X);
|
||||
unsigned long order, base = 0;
|
||||
|
||||
if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
|
||||
return SBI_ENOSPC;
|
||||
|
||||
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
|
||||
order <= __riscv_xlen; order++) {
|
||||
if (order < __riscv_xlen) {
|
||||
base = addr & ~((1UL << order) - 1UL);
|
||||
if ((base <= addr) &&
|
||||
(addr < (base + (1UL << order))) &&
|
||||
(base <= (addr + size - 1UL)) &&
|
||||
((addr + size - 1UL) < (base + (1UL << order))))
|
||||
break;
|
||||
} else {
|
||||
return SBI_EFAIL;
|
||||
}
|
||||
}
|
||||
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
|
||||
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
|
||||
pmp_flags, base, order);
|
||||
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static int sbi_hart_smepmp_unmap_range(struct sbi_scratch *scratch,
|
||||
unsigned long addr, unsigned long size)
|
||||
{
|
||||
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
|
||||
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
|
||||
}
|
||||
|
||||
static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_domain_memregion *reg;
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
unsigned long pmp_addr, pmp_addr_max;
|
||||
unsigned int pmp_log2gran, pmp_bits;
|
||||
unsigned int pmp_idx, pmp_count;
|
||||
unsigned int pmp_flags;
|
||||
|
||||
pmp_count = sbi_hart_pmp_count(scratch);
|
||||
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
|
||||
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
|
||||
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
|
||||
|
||||
pmp_idx = 0;
|
||||
sbi_domain_for_each_memregion(dom, reg) {
|
||||
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
|
||||
return SBI_EFAIL;
|
||||
|
||||
pmp_flags = 0;
|
||||
|
||||
/*
|
||||
* If permissions are to be enforced for all modes on
|
||||
* this region, the lock bit should be set.
|
||||
*/
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
|
||||
pmp_flags |= PMP_L;
|
||||
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
|
||||
pmp_flags |= PMP_R;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
|
||||
pmp_flags |= PMP_W;
|
||||
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
|
||||
pmp_flags |= PMP_X;
|
||||
|
||||
pmp_addr = reg->base >> PMP_SHIFT;
|
||||
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
|
||||
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
|
||||
pmp_idx, reg->flags, pmp_flags,
|
||||
reg->base, reg->order);
|
||||
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
|
||||
} else {
|
||||
sbi_printf("Can not configure pmp for domain %s because"
|
||||
" memory region address 0x%lx or size 0x%lx "
|
||||
"is not in range.\n", dom->name, reg->base,
|
||||
reg->order);
|
||||
}
|
||||
}
|
||||
|
||||
sbi_hart_pmp_fence();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
|
||||
{
|
||||
int i, pmp_count = sbi_hart_pmp_count(scratch);
|
||||
|
||||
for (i = 0; i < pmp_count; i++) {
|
||||
/* Don't revoke firmware access permissions */
|
||||
if (sbi_hart_smepmp_is_fw_region(i))
|
||||
continue;
|
||||
|
||||
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), i);
|
||||
pmp_disable(i);
|
||||
}
|
||||
}
|
||||
|
||||
static struct sbi_hart_protection pmp_protection = {
|
||||
.name = "pmp",
|
||||
.rating = 100,
|
||||
.configure = sbi_hart_oldpmp_configure,
|
||||
.unconfigure = sbi_hart_pmp_unconfigure,
|
||||
};
|
||||
|
||||
static struct sbi_hart_protection epmp_protection = {
|
||||
.name = "epmp",
|
||||
.rating = 200,
|
||||
.configure = sbi_hart_smepmp_configure,
|
||||
.unconfigure = sbi_hart_pmp_unconfigure,
|
||||
.map_range = sbi_hart_smepmp_map_range,
|
||||
.unmap_range = sbi_hart_smepmp_unmap_range,
|
||||
};
|
||||
|
||||
int sbi_hart_pmp_init(struct sbi_scratch *scratch)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (sbi_hart_pmp_count(scratch)) {
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) {
|
||||
rc = sbi_hart_protection_register(&epmp_protection);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else {
|
||||
rc = sbi_hart_protection_register(&pmp_protection);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
96
lib/sbi/sbi_hart_protection.c
Normal file
96
lib/sbi/sbi_hart_protection.c
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 Ventana Micro Systems Inc.
|
||||
*/
|
||||
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_scratch.h>
|
||||
|
||||
static SBI_LIST_HEAD(hart_protection_list);
|
||||
|
||||
struct sbi_hart_protection *sbi_hart_protection_best(void)
|
||||
{
|
||||
if (sbi_list_empty(&hart_protection_list))
|
||||
return NULL;
|
||||
|
||||
return sbi_list_first_entry(&hart_protection_list, struct sbi_hart_protection, head);
|
||||
}
|
||||
|
||||
int sbi_hart_protection_register(struct sbi_hart_protection *hprot)
|
||||
{
|
||||
struct sbi_hart_protection *pos = NULL;
|
||||
bool found_pos = false;
|
||||
|
||||
if (!hprot)
|
||||
return SBI_EINVAL;
|
||||
|
||||
sbi_list_for_each_entry(pos, &hart_protection_list, head) {
|
||||
if (hprot->rating > pos->rating) {
|
||||
found_pos = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_pos)
|
||||
sbi_list_add_tail(&hprot->head, &pos->head);
|
||||
else
|
||||
sbi_list_add_tail(&hprot->head, &hart_protection_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sbi_hart_protection_unregister(struct sbi_hart_protection *hprot)
|
||||
{
|
||||
if (!hprot)
|
||||
return;
|
||||
|
||||
sbi_list_del(&hprot->head);
|
||||
}
|
||||
|
||||
int sbi_hart_protection_configure(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
|
||||
|
||||
if (!hprot)
|
||||
return SBI_EINVAL;
|
||||
if (!hprot->configure)
|
||||
return SBI_ENOSYS;
|
||||
|
||||
return hprot->configure(scratch);
|
||||
}
|
||||
|
||||
void sbi_hart_protection_unconfigure(struct sbi_scratch *scratch)
|
||||
{
|
||||
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
|
||||
|
||||
if (!hprot || !hprot->unconfigure)
|
||||
return;
|
||||
|
||||
hprot->unconfigure(scratch);
|
||||
}
|
||||
|
||||
int sbi_hart_protection_map_range(unsigned long base, unsigned long size)
|
||||
{
|
||||
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
|
||||
|
||||
if (!hprot)
|
||||
return SBI_EINVAL;
|
||||
if (!hprot->map_range)
|
||||
return 0;
|
||||
|
||||
return hprot->map_range(sbi_scratch_thishart_ptr(), base, size);
|
||||
}
|
||||
|
||||
int sbi_hart_protection_unmap_range(unsigned long base, unsigned long size)
|
||||
{
|
||||
struct sbi_hart_protection *hprot = sbi_hart_protection_best();
|
||||
|
||||
if (!hprot)
|
||||
return SBI_EINVAL;
|
||||
if (!hprot->unmap_range)
|
||||
return 0;
|
||||
|
||||
return hprot->unmap_range(sbi_scratch_thishart_ptr(), base, size);
|
||||
}
|
||||
@@ -16,7 +16,9 @@
|
||||
|
||||
/* Minimum size and alignment of heap allocations */
|
||||
#define HEAP_ALLOC_ALIGN 64
|
||||
#define HEAP_HOUSEKEEPING_FACTOR 16
|
||||
|
||||
/* Number of heap nodes to allocate at once */
|
||||
#define HEAP_NODE_BATCH_SIZE 8
|
||||
|
||||
struct heap_node {
|
||||
struct sbi_dlist head;
|
||||
@@ -28,20 +30,50 @@ struct sbi_heap_control {
|
||||
spinlock_t lock;
|
||||
unsigned long base;
|
||||
unsigned long size;
|
||||
unsigned long hkbase;
|
||||
unsigned long hksize;
|
||||
unsigned long resv;
|
||||
struct sbi_dlist free_node_list;
|
||||
struct sbi_dlist free_space_list;
|
||||
struct sbi_dlist used_space_list;
|
||||
struct heap_node init_free_space_node;
|
||||
};
|
||||
|
||||
struct sbi_heap_control global_hpctrl;
|
||||
|
||||
static bool alloc_nodes(struct sbi_heap_control *hpctrl)
|
||||
{
|
||||
size_t size = HEAP_NODE_BATCH_SIZE * sizeof(struct heap_node);
|
||||
struct heap_node *n, *new = NULL;
|
||||
|
||||
/* alloc_with_align() requires at most two free nodes */
|
||||
if (hpctrl->free_node_list.next != hpctrl->free_node_list.prev)
|
||||
return true;
|
||||
|
||||
sbi_list_for_each_entry_reverse(n, &hpctrl->free_space_list, head) {
|
||||
if (n->size >= size) {
|
||||
n->size -= size;
|
||||
if (!n->size) {
|
||||
sbi_list_del(&n->head);
|
||||
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
|
||||
}
|
||||
new = (void *)(n->addr + n->size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!new)
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i < HEAP_NODE_BATCH_SIZE; i++)
|
||||
sbi_list_add_tail(&new[i].head, &hpctrl->free_node_list);
|
||||
hpctrl->resv += size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *alloc_with_align(struct sbi_heap_control *hpctrl,
|
||||
size_t align, size_t size)
|
||||
{
|
||||
void *ret = NULL;
|
||||
struct heap_node *n, *np, *rem;
|
||||
struct heap_node *n, *np;
|
||||
unsigned long lowest_aligned;
|
||||
size_t pad;
|
||||
|
||||
@@ -53,6 +85,10 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
|
||||
|
||||
spin_lock(&hpctrl->lock);
|
||||
|
||||
/* Ensure at least two free nodes are available for use below */
|
||||
if (!alloc_nodes(hpctrl))
|
||||
goto out;
|
||||
|
||||
np = NULL;
|
||||
sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) {
|
||||
lowest_aligned = ROUNDUP(n->addr, align);
|
||||
@@ -67,54 +103,33 @@ static void *alloc_with_align(struct sbi_heap_control *hpctrl,
|
||||
goto out;
|
||||
|
||||
if (pad) {
|
||||
if (sbi_list_empty(&hpctrl->free_node_list)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
n = sbi_list_first_entry(&hpctrl->free_node_list,
|
||||
struct heap_node, head);
|
||||
sbi_list_del(&n->head);
|
||||
|
||||
if ((size + pad < np->size) &&
|
||||
!sbi_list_empty(&hpctrl->free_node_list)) {
|
||||
rem = sbi_list_first_entry(&hpctrl->free_node_list,
|
||||
struct heap_node, head);
|
||||
sbi_list_del(&rem->head);
|
||||
rem->addr = np->addr + (size + pad);
|
||||
rem->size = np->size - (size + pad);
|
||||
sbi_list_add_tail(&rem->head,
|
||||
&hpctrl->free_space_list);
|
||||
} else if (size + pad != np->size) {
|
||||
/* Can't allocate, return n */
|
||||
sbi_list_add(&n->head, &hpctrl->free_node_list);
|
||||
ret = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
n->addr = lowest_aligned;
|
||||
n->size = size;
|
||||
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
|
||||
|
||||
np->size = pad;
|
||||
ret = (void *)n->addr;
|
||||
} else {
|
||||
if ((size < np->size) &&
|
||||
!sbi_list_empty(&hpctrl->free_node_list)) {
|
||||
n = sbi_list_first_entry(&hpctrl->free_node_list,
|
||||
struct heap_node, head);
|
||||
sbi_list_del(&n->head);
|
||||
n->addr = np->addr;
|
||||
n->size = size;
|
||||
np->addr += size;
|
||||
np->size -= size;
|
||||
sbi_list_add_tail(&n->head, &hpctrl->used_space_list);
|
||||
ret = (void *)n->addr;
|
||||
} else if (size == np->size) {
|
||||
n->size = pad;
|
||||
sbi_list_add_tail(&n->head, &np->head);
|
||||
|
||||
np->addr += pad;
|
||||
np->size -= pad;
|
||||
}
|
||||
|
||||
if (size < np->size) {
|
||||
n = sbi_list_first_entry(&hpctrl->free_node_list,
|
||||
struct heap_node, head);
|
||||
sbi_list_del(&n->head);
|
||||
|
||||
n->addr = np->addr + size;
|
||||
n->size = np->size - size;
|
||||
sbi_list_add(&n->head, &np->head);
|
||||
|
||||
np->size = size;
|
||||
}
|
||||
|
||||
sbi_list_del(&np->head);
|
||||
sbi_list_add_tail(&np->head, &hpctrl->used_space_list);
|
||||
ret = (void *)np->addr;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&hpctrl->lock);
|
||||
@@ -216,44 +231,32 @@ unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl)
|
||||
|
||||
unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl)
|
||||
{
|
||||
return hpctrl->size - hpctrl->hksize - sbi_heap_free_space();
|
||||
return hpctrl->size - hpctrl->resv - sbi_heap_free_space();
|
||||
}
|
||||
|
||||
unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl)
|
||||
{
|
||||
return hpctrl->hksize;
|
||||
return hpctrl->resv;
|
||||
}
|
||||
|
||||
int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long i;
|
||||
struct heap_node *n;
|
||||
|
||||
/* Initialize heap control */
|
||||
SPIN_LOCK_INIT(hpctrl->lock);
|
||||
hpctrl->base = base;
|
||||
hpctrl->size = size;
|
||||
hpctrl->hkbase = hpctrl->base;
|
||||
hpctrl->hksize = hpctrl->size / HEAP_HOUSEKEEPING_FACTOR;
|
||||
hpctrl->hksize &= ~((unsigned long)HEAP_BASE_ALIGN - 1);
|
||||
hpctrl->resv = 0;
|
||||
SBI_INIT_LIST_HEAD(&hpctrl->free_node_list);
|
||||
SBI_INIT_LIST_HEAD(&hpctrl->free_space_list);
|
||||
SBI_INIT_LIST_HEAD(&hpctrl->used_space_list);
|
||||
|
||||
/* Prepare free node list */
|
||||
for (i = 0; i < (hpctrl->hksize / sizeof(*n)); i++) {
|
||||
n = (struct heap_node *)(hpctrl->hkbase + (sizeof(*n) * i));
|
||||
n->addr = n->size = 0;
|
||||
sbi_list_add_tail(&n->head, &hpctrl->free_node_list);
|
||||
}
|
||||
|
||||
/* Prepare free space list */
|
||||
n = sbi_list_first_entry(&hpctrl->free_node_list,
|
||||
struct heap_node, head);
|
||||
sbi_list_del(&n->head);
|
||||
n->addr = hpctrl->hkbase + hpctrl->hksize;
|
||||
n->size = hpctrl->size - hpctrl->hksize;
|
||||
n = &hpctrl->init_free_space_node;
|
||||
n->addr = base;
|
||||
n->size = size;
|
||||
sbi_list_add_tail(&n->head, &hpctrl->free_space_list);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -47,10 +47,8 @@ struct sbi_hsm_data {
|
||||
unsigned long saved_mie;
|
||||
unsigned long saved_mip;
|
||||
unsigned long saved_medeleg;
|
||||
unsigned long saved_menvcfg;
|
||||
#if __riscv_xlen == 32
|
||||
unsigned long saved_menvcfgh;
|
||||
#endif
|
||||
unsigned long saved_mideleg;
|
||||
u64 saved_menvcfg;
|
||||
atomic_t start_ticket;
|
||||
};
|
||||
|
||||
@@ -366,7 +364,7 @@ int sbi_hsm_hart_start(struct sbi_scratch *scratch,
|
||||
(hsm_device_has_hart_secondary_boot() && !init_count)) {
|
||||
rc = hsm_device_hart_start(hartid, scratch->warmboot_addr);
|
||||
} else {
|
||||
rc = sbi_ipi_raw_send(hartindex);
|
||||
rc = sbi_ipi_raw_send(hartindex, true);
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
@@ -429,12 +427,9 @@ void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
|
||||
hdata->saved_mie = csr_read(CSR_MIE);
|
||||
hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
|
||||
hdata->saved_medeleg = csr_read(CSR_MEDELEG);
|
||||
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
|
||||
#if __riscv_xlen == 32
|
||||
hdata->saved_menvcfgh = csr_read(CSR_MENVCFGH);
|
||||
#endif
|
||||
hdata->saved_menvcfg = csr_read(CSR_MENVCFG);
|
||||
}
|
||||
hdata->saved_mideleg = csr_read(CSR_MIDELEG);
|
||||
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12)
|
||||
hdata->saved_menvcfg = csr_read64(CSR_MENVCFG);
|
||||
}
|
||||
|
||||
static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
|
||||
@@ -442,12 +437,9 @@ static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
|
||||
struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
|
||||
hart_data_offset);
|
||||
|
||||
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
|
||||
csr_write(CSR_MENVCFG, hdata->saved_menvcfg);
|
||||
#if __riscv_xlen == 32
|
||||
csr_write(CSR_MENVCFGH, hdata->saved_menvcfgh);
|
||||
#endif
|
||||
}
|
||||
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12)
|
||||
csr_write64(CSR_MENVCFG, hdata->saved_menvcfg);
|
||||
csr_write(CSR_MIDELEG, hdata->saved_mideleg);
|
||||
csr_write(CSR_MEDELEG, hdata->saved_medeleg);
|
||||
csr_write(CSR_MIE, hdata->saved_mie);
|
||||
csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
|
||||
@@ -463,6 +455,9 @@ void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
|
||||
SBI_HSM_STATE_RESUME_PENDING))
|
||||
sbi_hart_hang();
|
||||
|
||||
if (sbi_system_is_suspended())
|
||||
sbi_system_resume();
|
||||
else
|
||||
hsm_device_hart_resume();
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs)
|
||||
{ \
|
||||
register ulong tinfo asm("a3"); \
|
||||
register ulong mstatus = 0; \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
type ret = 0; \
|
||||
trap->cause = 0; \
|
||||
asm volatile( \
|
||||
@@ -57,7 +57,7 @@ int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs)
|
||||
{ \
|
||||
register ulong tinfo asm("a3"); \
|
||||
register ulong mstatus = 0; \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
type ret = 0; \
|
||||
trap->cause = 0; \
|
||||
asm volatile( \
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
#include <sbi/sbi_fwft.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hartmask.h>
|
||||
#include <sbi/sbi_hart_pmp.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_ipi.h>
|
||||
@@ -74,6 +76,7 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch)
|
||||
const struct sbi_hsm_device *hdev;
|
||||
const struct sbi_ipi_device *idev;
|
||||
const struct sbi_timer_device *tdev;
|
||||
const struct sbi_hart_protection *hprot;
|
||||
const struct sbi_console_device *cdev;
|
||||
const struct sbi_system_reset_device *srdev;
|
||||
const struct sbi_system_suspend_device *susp_dev;
|
||||
@@ -90,6 +93,9 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch)
|
||||
sbi_printf("Platform Features : %s\n", str);
|
||||
sbi_printf("Platform HART Count : %u\n",
|
||||
sbi_platform_hart_count(plat));
|
||||
hprot = sbi_hart_protection_best();
|
||||
sbi_printf("Platform HART Protection : %s\n",
|
||||
(hprot) ? hprot->name : "---");
|
||||
idev = sbi_ipi_get_device();
|
||||
sbi_printf("Platform IPI Device : %s\n",
|
||||
(idev) ? idev->name : "---");
|
||||
@@ -384,12 +390,12 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure PMP at last because if SMEPMP is detected,
|
||||
* M-mode access to the S/U space will be rescinded.
|
||||
* Configure hart isolation at last because if SMEPMP is,
|
||||
* detected, M-mode access to the S/U space will be rescinded.
|
||||
*/
|
||||
rc = sbi_hart_pmp_configure(scratch);
|
||||
rc = sbi_hart_protection_configure(scratch);
|
||||
if (rc) {
|
||||
sbi_printf("%s: PMP configure failed (error %d)\n",
|
||||
sbi_printf("%s: hart isolation configure failed (error %d)\n",
|
||||
__func__, rc);
|
||||
sbi_hart_hang();
|
||||
}
|
||||
@@ -463,10 +469,10 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
|
||||
sbi_hart_hang();
|
||||
|
||||
/*
|
||||
* Configure PMP at last because if SMEPMP is detected,
|
||||
* M-mode access to the S/U space will be rescinded.
|
||||
* Configure hart isolation at last because if SMEPMP is,
|
||||
* detected, M-mode access to the S/U space will be rescinded.
|
||||
*/
|
||||
rc = sbi_hart_pmp_configure(scratch);
|
||||
rc = sbi_hart_protection_configure(scratch);
|
||||
if (rc)
|
||||
sbi_hart_hang();
|
||||
|
||||
@@ -487,7 +493,7 @@ static void __noreturn init_warm_resume(struct sbi_scratch *scratch,
|
||||
if (rc)
|
||||
sbi_hart_hang();
|
||||
|
||||
rc = sbi_hart_pmp_configure(scratch);
|
||||
rc = sbi_hart_protection_configure(scratch);
|
||||
if (rc)
|
||||
sbi_hart_hang();
|
||||
|
||||
@@ -507,7 +513,7 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
|
||||
if (hstate == SBI_HSM_STATE_SUSPENDED) {
|
||||
init_warm_resume(scratch, hartid);
|
||||
} else {
|
||||
sbi_ipi_raw_clear();
|
||||
sbi_ipi_raw_clear(true);
|
||||
init_warm_startup(scratch, hartid);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,9 +15,11 @@
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_init.h>
|
||||
#include <sbi/sbi_ipi.h>
|
||||
#include <sbi/sbi_list.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi/sbi_pmu.h>
|
||||
#include <sbi/sbi_string.h>
|
||||
@@ -32,8 +34,14 @@ _Static_assert(
|
||||
"type of sbi_ipi_data.ipi_type has changed, please redefine SBI_IPI_EVENT_MAX"
|
||||
);
|
||||
|
||||
struct sbi_ipi_device_node {
|
||||
struct sbi_dlist head;
|
||||
const struct sbi_ipi_device *dev;
|
||||
};
|
||||
|
||||
static unsigned long ipi_data_off;
|
||||
static const struct sbi_ipi_device *ipi_dev = NULL;
|
||||
static SBI_LIST_HEAD(ipi_dev_node_list);
|
||||
static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX];
|
||||
|
||||
static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex,
|
||||
@@ -80,7 +88,7 @@ static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartindex,
|
||||
*/
|
||||
if (!__atomic_fetch_or(&ipi_data->ipi_type,
|
||||
BIT(event), __ATOMIC_RELAXED))
|
||||
ret = sbi_ipi_raw_send(remote_hartindex);
|
||||
ret = sbi_ipi_raw_send(remote_hartindex, false);
|
||||
|
||||
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_SENT);
|
||||
|
||||
@@ -248,7 +256,7 @@ void sbi_ipi_process(void)
|
||||
sbi_scratch_offset_ptr(scratch, ipi_data_off);
|
||||
|
||||
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_IPI_RECVD);
|
||||
sbi_ipi_raw_clear();
|
||||
sbi_ipi_raw_clear(false);
|
||||
|
||||
ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0);
|
||||
ipi_event = 0;
|
||||
@@ -263,8 +271,10 @@ void sbi_ipi_process(void)
|
||||
}
|
||||
}
|
||||
|
||||
int sbi_ipi_raw_send(u32 hartindex)
|
||||
int sbi_ipi_raw_send(u32 hartindex, bool all_devices)
|
||||
{
|
||||
struct sbi_ipi_device_node *entry;
|
||||
|
||||
if (!ipi_dev || !ipi_dev->ipi_send)
|
||||
return SBI_EINVAL;
|
||||
|
||||
@@ -279,14 +289,31 @@ int sbi_ipi_raw_send(u32 hartindex)
|
||||
*/
|
||||
wmb();
|
||||
|
||||
if (all_devices) {
|
||||
sbi_list_for_each_entry(entry, &ipi_dev_node_list, head) {
|
||||
if (entry->dev->ipi_send)
|
||||
entry->dev->ipi_send(hartindex);
|
||||
}
|
||||
} else {
|
||||
ipi_dev->ipi_send(hartindex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sbi_ipi_raw_clear(void)
|
||||
void sbi_ipi_raw_clear(bool all_devices)
|
||||
{
|
||||
struct sbi_ipi_device_node *entry;
|
||||
|
||||
if (all_devices) {
|
||||
sbi_list_for_each_entry(entry, &ipi_dev_node_list, head) {
|
||||
if (entry->dev->ipi_clear)
|
||||
entry->dev->ipi_clear();
|
||||
}
|
||||
} else {
|
||||
if (ipi_dev && ipi_dev->ipi_clear)
|
||||
ipi_dev->ipi_clear();
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that memory or MMIO writes after this
|
||||
@@ -305,11 +332,21 @@ const struct sbi_ipi_device *sbi_ipi_get_device(void)
|
||||
return ipi_dev;
|
||||
}
|
||||
|
||||
void sbi_ipi_set_device(const struct sbi_ipi_device *dev)
|
||||
void sbi_ipi_add_device(const struct sbi_ipi_device *dev)
|
||||
{
|
||||
if (!dev || ipi_dev)
|
||||
struct sbi_ipi_device_node *entry;
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
entry = sbi_zalloc(sizeof(*entry));
|
||||
if (!entry)
|
||||
return;
|
||||
SBI_INIT_LIST_HEAD(&entry->head);
|
||||
entry->dev = dev;
|
||||
sbi_list_add_tail(&entry->head, &ipi_dev_node_list);
|
||||
|
||||
if (!ipi_dev || ipi_dev->rating < dev->rating)
|
||||
ipi_dev = dev;
|
||||
}
|
||||
|
||||
@@ -330,11 +367,6 @@ int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ipi_halt_event = ret;
|
||||
|
||||
/* Initialize platform IPI support */
|
||||
ret = sbi_platform_ipi_init(sbi_platform_ptr(scratch));
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
if (!ipi_data_off)
|
||||
return SBI_ENOMEM;
|
||||
@@ -347,7 +379,7 @@ int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
|
||||
ipi_data->ipi_type = 0x00;
|
||||
|
||||
/* Clear any pending IPIs for the current hart */
|
||||
sbi_ipi_raw_clear();
|
||||
sbi_ipi_raw_clear(true);
|
||||
|
||||
/* Enable software interrupts */
|
||||
csr_set(CSR_MIE, MIP_MSIP);
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi/sbi_mpxy.h>
|
||||
@@ -375,10 +376,10 @@ int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
|
||||
if (flags == SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN) {
|
||||
ret_buf = (unsigned long *)(ulong)SHMEM_PHYS_ADDR(shmem_phys_hi,
|
||||
shmem_phys_lo);
|
||||
sbi_hart_map_saddr((unsigned long)ret_buf, mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)ret_buf, mpxy_shmem_size);
|
||||
ret_buf[0] = cpu_to_lle(ms->shmem.shmem_addr_lo);
|
||||
ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_hi);
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)ret_buf, mpxy_shmem_size);
|
||||
}
|
||||
|
||||
/** Setup the new shared memory */
|
||||
@@ -407,7 +408,7 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
|
||||
shmem_base = hart_shmem_base(ms);
|
||||
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
|
||||
/** number of channel ids which can be stored in shmem adjusting
|
||||
* for remaining and returned fields */
|
||||
@@ -434,7 +435,7 @@ int sbi_mpxy_get_channel_ids(u32 start_index)
|
||||
shmem_base[0] = cpu_to_le32(remaining);
|
||||
shmem_base[1] = cpu_to_le32(returned);
|
||||
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
|
||||
return SBI_SUCCESS;
|
||||
}
|
||||
@@ -465,7 +466,7 @@ int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
|
||||
shmem_base = hart_shmem_base(ms);
|
||||
end_id = base_attr_id + attr_count - 1;
|
||||
|
||||
sbi_hart_map_saddr((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
|
||||
/* Standard attributes range check */
|
||||
if (mpxy_is_std_attr(base_attr_id)) {
|
||||
@@ -504,7 +505,7 @@ int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
|
||||
base_attr_id, attr_count);
|
||||
}
|
||||
out:
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)hart_shmem_base(ms), mpxy_shmem_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -616,7 +617,7 @@ int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
|
||||
shmem_base = hart_shmem_base(ms);
|
||||
end_id = base_attr_id + attr_count - 1;
|
||||
|
||||
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
|
||||
mem_ptr = (u32 *)shmem_base;
|
||||
|
||||
@@ -673,7 +674,7 @@ int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count)
|
||||
base_attr_id, attr_count);
|
||||
}
|
||||
out:
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -705,7 +706,7 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
|
||||
shmem_base = hart_shmem_base(ms);
|
||||
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
|
||||
if (resp_data_len) {
|
||||
resp_buf = shmem_base;
|
||||
@@ -722,7 +723,7 @@ int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
|
||||
msg_data_len);
|
||||
}
|
||||
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
|
||||
if (ret == SBI_ERR_TIMEOUT || ret == SBI_ERR_IO)
|
||||
return ret;
|
||||
@@ -752,12 +753,12 @@ int sbi_mpxy_get_notification_events(u32 channel_id, unsigned long *events_len)
|
||||
return SBI_ERR_NOT_SUPPORTED;
|
||||
|
||||
shmem_base = hart_shmem_base(ms);
|
||||
sbi_hart_map_saddr((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
sbi_hart_protection_map_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
eventsbuf = shmem_base;
|
||||
ret = channel->get_notification_events(channel, eventsbuf,
|
||||
mpxy_shmem_size,
|
||||
events_len);
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range((unsigned long)shmem_base, mpxy_shmem_size);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_ecall_interface.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi/sbi_pmu.h>
|
||||
@@ -56,6 +57,14 @@ union sbi_pmu_ctr_info {
|
||||
#error "Can't handle firmware counters beyond BITS_PER_LONG"
|
||||
#endif
|
||||
|
||||
/** HW event configuration parameters */
|
||||
struct sbi_pmu_hw_event_config {
|
||||
/* event_data value from sbi_pmu_ctr_cfg_match() */
|
||||
uint64_t event_data;
|
||||
/* HW events flags from sbi_pmu_ctr_cfg_match() */
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
/** Per-HART state of the PMU counters */
|
||||
struct sbi_pmu_hart_state {
|
||||
/* HART to which this state belongs */
|
||||
@@ -72,6 +81,12 @@ struct sbi_pmu_hart_state {
|
||||
* and hence can optimally share the same memory.
|
||||
*/
|
||||
uint64_t fw_counters_data[SBI_PMU_FW_CTR_MAX];
|
||||
/* HW events configuration parameters from
|
||||
* sbi_pmu_ctr_cfg_match() command which are
|
||||
* used for restoring RAW hardware events after
|
||||
* cpu suspending.
|
||||
*/
|
||||
struct sbi_pmu_hw_event_config hw_counters_cfg[SBI_PMU_HW_CTR_MAX];
|
||||
};
|
||||
|
||||
/** Offset of pointer to PMU HART state in scratch space */
|
||||
@@ -463,6 +478,61 @@ static int pmu_ctr_start_fw(struct sbi_pmu_hart_state *phs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
|
||||
{
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
|
||||
*mhpmevent_val |= MHPMEVENT_VUINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
|
||||
*mhpmevent_val |= MHPMEVENT_VSINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
|
||||
*mhpmevent_val |= MHPMEVENT_UINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
|
||||
*mhpmevent_val |= MHPMEVENT_SINH;
|
||||
}
|
||||
|
||||
static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
|
||||
unsigned long flags, unsigned long eindex,
|
||||
uint64_t data)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
|
||||
uint64_t mhpmevent_val;
|
||||
|
||||
/* Get the final mhpmevent value to be written from platform */
|
||||
mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
|
||||
|
||||
if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
|
||||
return SBI_EFAIL;
|
||||
|
||||
/**
|
||||
* Always set the OVF bit(disable interrupts) and inhibit counting of
|
||||
* events in M-mode. The OVF bit should be enabled during the start call.
|
||||
*/
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
|
||||
MHPMEVENT_MINH | MHPMEVENT_OF;
|
||||
|
||||
if (pmu_dev && pmu_dev->hw_counter_disable_irq)
|
||||
pmu_dev->hw_counter_disable_irq(ctr_idx);
|
||||
|
||||
/* Update the inhibit flags based on inhibit flags received from supervisor */
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
pmu_update_inhibit_flags(flags, &mhpmevent_val);
|
||||
if (pmu_dev && pmu_dev->hw_counter_filter_mode)
|
||||
pmu_dev->hw_counter_filter_mode(flags, ctr_idx);
|
||||
|
||||
#if __riscv_xlen == 32
|
||||
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3,
|
||||
mhpmevent_val >> BITS_PER_LONG);
|
||||
#else
|
||||
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
|
||||
unsigned long flags, uint64_t ival)
|
||||
{
|
||||
@@ -499,10 +569,21 @@ int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
|
||||
: 0x0;
|
||||
ret = pmu_ctr_start_fw(phs, cidx, event_code, edata,
|
||||
ival, bUpdate);
|
||||
} else {
|
||||
if (cidx >= 3) {
|
||||
struct sbi_pmu_hw_event_config *ev_cfg =
|
||||
&phs->hw_counters_cfg[cidx];
|
||||
|
||||
ret = pmu_update_hw_mhpmevent(&hw_event_map[cidx], cidx,
|
||||
ev_cfg->flags,
|
||||
phs->active_events[cidx],
|
||||
ev_cfg->event_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
else
|
||||
ret = pmu_ctr_start_hw(cidx, ival, bUpdate);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -614,61 +695,6 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
|
||||
{
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
|
||||
*mhpmevent_val |= MHPMEVENT_VUINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
|
||||
*mhpmevent_val |= MHPMEVENT_VSINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
|
||||
*mhpmevent_val |= MHPMEVENT_UINH;
|
||||
if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
|
||||
*mhpmevent_val |= MHPMEVENT_SINH;
|
||||
}
|
||||
|
||||
static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
|
||||
unsigned long flags, unsigned long eindex,
|
||||
uint64_t data)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
|
||||
uint64_t mhpmevent_val;
|
||||
|
||||
/* Get the final mhpmevent value to be written from platform */
|
||||
mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
|
||||
|
||||
if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
|
||||
return SBI_EFAIL;
|
||||
|
||||
/**
|
||||
* Always set the OVF bit(disable interrupts) and inhibit counting of
|
||||
* events in M-mode. The OVF bit should be enabled during the start call.
|
||||
*/
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
|
||||
MHPMEVENT_MINH | MHPMEVENT_OF;
|
||||
|
||||
if (pmu_dev && pmu_dev->hw_counter_disable_irq)
|
||||
pmu_dev->hw_counter_disable_irq(ctr_idx);
|
||||
|
||||
/* Update the inhibit flags based on inhibit flags received from supervisor */
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
pmu_update_inhibit_flags(flags, &mhpmevent_val);
|
||||
if (pmu_dev && pmu_dev->hw_counter_filter_mode)
|
||||
pmu_dev->hw_counter_filter_mode(flags, ctr_idx);
|
||||
|
||||
#if __riscv_xlen == 32
|
||||
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
|
||||
csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3,
|
||||
mhpmevent_val >> BITS_PER_LONG);
|
||||
#else
|
||||
csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_fixed_ctr_update_inhibit_bits(int fixed_ctr, unsigned long flags)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
@@ -780,6 +806,7 @@ static int pmu_ctr_find_hw(struct sbi_pmu_hart_state *phs,
|
||||
continue;
|
||||
/* We found a valid counter that is not started yet */
|
||||
ctr_idx = cbase;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -817,7 +844,7 @@ static int pmu_ctr_find_fw(struct sbi_pmu_hart_state *phs,
|
||||
cidx = i + cbase;
|
||||
if (cidx < num_hw_ctrs || total_ctrs <= cidx)
|
||||
continue;
|
||||
if (phs->active_events[i] != SBI_PMU_EVENT_IDX_INVALID)
|
||||
if (phs->active_events[cidx] != SBI_PMU_EVENT_IDX_INVALID)
|
||||
continue;
|
||||
if (SBI_PMU_FW_PLATFORM == event_code &&
|
||||
pmu_dev && pmu_dev->fw_counter_match_encoding) {
|
||||
@@ -827,7 +854,7 @@ static int pmu_ctr_find_fw(struct sbi_pmu_hart_state *phs,
|
||||
continue;
|
||||
}
|
||||
|
||||
return i;
|
||||
return cidx;
|
||||
}
|
||||
|
||||
return SBI_ENOTSUPP;
|
||||
@@ -873,12 +900,20 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
|
||||
/* Any firmware counter can be used track any firmware event */
|
||||
ctr_idx = pmu_ctr_find_fw(phs, cidx_base, cidx_mask,
|
||||
event_code, event_data);
|
||||
if (event_code == SBI_PMU_FW_PLATFORM)
|
||||
if ((event_code == SBI_PMU_FW_PLATFORM) && (ctr_idx >= num_hw_ctrs))
|
||||
phs->fw_counters_data[ctr_idx - num_hw_ctrs] =
|
||||
event_data;
|
||||
} else {
|
||||
ctr_idx = pmu_ctr_find_hw(phs, cidx_base, cidx_mask, flags,
|
||||
event_idx, event_data);
|
||||
if (ctr_idx >= 0) {
|
||||
struct sbi_pmu_hw_event_config *ev_cfg =
|
||||
&phs->hw_counters_cfg[ctr_idx];
|
||||
|
||||
ev_cfg->event_data = event_data;
|
||||
/* Remove flags that are used in match call only */
|
||||
ev_cfg->flags = flags & SBI_PMU_CFG_EVENT_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctr_idx < 0)
|
||||
@@ -1019,7 +1054,7 @@ int sbi_pmu_event_get_info(unsigned long shmem_phys_lo, unsigned long shmem_phys
|
||||
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE))
|
||||
return SBI_ERR_INVALID_ADDRESS;
|
||||
|
||||
sbi_hart_map_saddr(shmem_phys_lo, shmem_size);
|
||||
sbi_hart_protection_map_range(shmem_phys_lo, shmem_size);
|
||||
|
||||
einfo = (struct sbi_pmu_event_info *)(shmem_phys_lo);
|
||||
for (i = 0; i < num_events; i++) {
|
||||
@@ -1053,7 +1088,7 @@ int sbi_pmu_event_get_info(unsigned long shmem_phys_lo, unsigned long shmem_phys
|
||||
}
|
||||
}
|
||||
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range(shmem_phys_lo, shmem_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_fifo.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hart_protection.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_ipi.h>
|
||||
@@ -1036,7 +1037,7 @@ int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sbi_hart_map_saddr(output_phys_lo, sizeof(unsigned long) * attr_count);
|
||||
sbi_hart_protection_map_range(output_phys_lo, sizeof(unsigned long) * attr_count);
|
||||
|
||||
/*
|
||||
* Copy all attributes at once since struct sse_event_attrs is matching
|
||||
@@ -1049,7 +1050,7 @@ int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
|
||||
attrs = (unsigned long *)output_phys_lo;
|
||||
copy_attrs(attrs, &e_attrs[base_attr_id], attr_count);
|
||||
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range(output_phys_lo, sizeof(unsigned long) * attr_count);
|
||||
|
||||
sse_event_put(e);
|
||||
|
||||
@@ -1064,7 +1065,7 @@ static int sse_write_attrs(struct sbi_sse_event *e, uint32_t base_attr_id,
|
||||
uint32_t id, end_id = base_attr_id + attr_count;
|
||||
unsigned long *attrs = (unsigned long *)input_phys;
|
||||
|
||||
sbi_hart_map_saddr(input_phys, sizeof(unsigned long) * attr_count);
|
||||
sbi_hart_protection_map_range(input_phys, sizeof(unsigned long) * attr_count);
|
||||
|
||||
for (id = base_attr_id; id < end_id; id++) {
|
||||
val = attrs[attr++];
|
||||
@@ -1080,7 +1081,7 @@ static int sse_write_attrs(struct sbi_sse_event *e, uint32_t base_attr_id,
|
||||
}
|
||||
|
||||
out:
|
||||
sbi_hart_unmap_saddr();
|
||||
sbi_hart_protection_unmap_range(input_phys, sizeof(unsigned long) * attr_count);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ void __noreturn sbi_system_reset(u32 reset_type, u32 reset_reason)
|
||||
}
|
||||
|
||||
static const struct sbi_system_suspend_device *suspend_dev = NULL;
|
||||
static bool system_suspended;
|
||||
|
||||
const struct sbi_system_suspend_device *sbi_system_suspend_get_device(void)
|
||||
{
|
||||
@@ -137,6 +138,19 @@ bool sbi_system_suspend_supported(u32 sleep_type)
|
||||
suspend_dev->system_suspend_check(sleep_type) == 0;
|
||||
}
|
||||
|
||||
bool sbi_system_is_suspended(void)
|
||||
{
|
||||
return system_suspended;
|
||||
}
|
||||
|
||||
void sbi_system_resume(void)
|
||||
{
|
||||
if (suspend_dev && suspend_dev->system_resume)
|
||||
suspend_dev->system_resume();
|
||||
|
||||
system_suspended = false;
|
||||
}
|
||||
|
||||
int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque)
|
||||
{
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
@@ -189,11 +203,14 @@ int sbi_system_suspend(u32 sleep_type, ulong resume_addr, ulong opaque)
|
||||
__sbi_hsm_suspend_non_ret_save(scratch);
|
||||
|
||||
/* Suspend */
|
||||
system_suspended = true;
|
||||
ret = suspend_dev->system_suspend(sleep_type, scratch->warmboot_addr);
|
||||
if (ret != SBI_OK) {
|
||||
if (!sbi_hsm_hart_change_state(scratch, SBI_HSM_STATE_SUSPENDED,
|
||||
SBI_HSM_STATE_STARTED))
|
||||
sbi_hart_hang();
|
||||
|
||||
system_suspended = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ static unsigned long tlb_fifo_off;
|
||||
static unsigned long tlb_fifo_mem_off;
|
||||
static unsigned long tlb_range_flush_limit;
|
||||
|
||||
static void tlb_flush_all(void)
|
||||
void __sbi_sfence_vma_all(void)
|
||||
{
|
||||
__asm__ __volatile("sfence.vma");
|
||||
}
|
||||
@@ -86,7 +86,7 @@ static void sbi_tlb_local_sfence_vma(struct sbi_tlb_info *tinfo)
|
||||
sbi_pmu_ctr_incr_fw(SBI_PMU_FW_SFENCE_VMA_RCVD);
|
||||
|
||||
if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
|
||||
tlb_flush_all();
|
||||
__sbi_sfence_vma_all();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
{ \
|
||||
register ulong tinfo asm("a3"); \
|
||||
register ulong mstatus = 0; \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
type ret = 0; \
|
||||
trap->cause = 0; \
|
||||
asm volatile( \
|
||||
@@ -51,7 +51,7 @@
|
||||
{ \
|
||||
register ulong tinfo asm("a3") = (ulong)trap; \
|
||||
register ulong mstatus = 0; \
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr(); \
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
|
||||
trap->cause = 0; \
|
||||
asm volatile( \
|
||||
"add %[tinfo], %[taddr], zero\n" \
|
||||
@@ -121,7 +121,7 @@ ulong sbi_get_insn(ulong mepc, struct sbi_trap_info *trap)
|
||||
register ulong tinfo asm("a3");
|
||||
register ulong ttmp asm("a4");
|
||||
register ulong mstatus = 0;
|
||||
register ulong mtvec = sbi_hart_expected_trap_addr();
|
||||
register ulong mtvec = (ulong)sbi_hart_expected_trap;
|
||||
ulong insn = 0;
|
||||
|
||||
trap->cause = 0;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
menu "Utils and Drivers Support"
|
||||
|
||||
source "$(OPENSBI_SRC_DIR)/lib/utils/cache/Kconfig"
|
||||
|
||||
source "$(OPENSBI_SRC_DIR)/lib/utils/cppc/Kconfig"
|
||||
|
||||
source "$(OPENSBI_SRC_DIR)/lib/utils/fdt/Kconfig"
|
||||
|
||||
31
lib/utils/cache/Kconfig
vendored
Normal file
31
lib/utils/cache/Kconfig
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
menu "Cache Support"
|
||||
|
||||
config FDT_CACHE
|
||||
bool "FDT based cache drivers"
|
||||
depends on FDT
|
||||
select CACHE
|
||||
default n
|
||||
|
||||
if FDT_CACHE
|
||||
|
||||
config FDT_CACHE_SIFIVE_CCACHE
|
||||
bool "SiFive CCACHE FDT cache driver"
|
||||
default n
|
||||
|
||||
config FDT_CACHE_SIFIVE_EC
|
||||
bool "SiFive EC FDT cache driver"
|
||||
default n
|
||||
|
||||
config FDT_CACHE_SIFIVE_PL2
|
||||
bool "SiFive PL2 FDT cache driver"
|
||||
default n
|
||||
|
||||
endif
|
||||
|
||||
config CACHE
|
||||
bool "Cache support"
|
||||
default n
|
||||
|
||||
endmenu
|
||||
46
lib/utils/cache/cache.c
vendored
Normal file
46
lib/utils/cache/cache.c
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi_utils/cache/cache.h>
|
||||
|
||||
static SBI_LIST_HEAD(cache_list);
|
||||
|
||||
struct cache_device *cache_find(u32 id)
|
||||
{
|
||||
struct cache_device *dev;
|
||||
|
||||
sbi_list_for_each_entry(dev, &cache_list, node) {
|
||||
if (dev->id == id)
|
||||
return dev;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int cache_add(struct cache_device *dev)
|
||||
{
|
||||
if (!dev)
|
||||
return SBI_ENODEV;
|
||||
|
||||
if (cache_find(dev->id))
|
||||
return SBI_EALREADY;
|
||||
|
||||
sbi_list_add(&dev->node, &cache_list);
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
int cache_flush_all(struct cache_device *dev)
|
||||
{
|
||||
if (!dev)
|
||||
return SBI_ENODEV;
|
||||
|
||||
if (!dev->ops || !dev->ops->cache_flush_all)
|
||||
return SBI_ENOTSUPP;
|
||||
|
||||
return dev->ops->cache_flush_all(dev);
|
||||
}
|
||||
89
lib/utils/cache/fdt_cache.c
vendored
Normal file
89
lib/utils/cache/fdt_cache.c
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi_utils/cache/fdt_cache.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
|
||||
/* List of FDT cache drivers generated at compile time */
|
||||
extern const struct fdt_driver *const fdt_cache_drivers[];
|
||||
|
||||
int fdt_cache_add(const void *fdt, int noff, struct cache_device *dev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
dev->id = noff;
|
||||
sbi_strncpy(dev->name, fdt_get_name(fdt, noff, NULL), sizeof(dev->name) - 1);
|
||||
sbi_dprintf("%s: %s\n", __func__, dev->name);
|
||||
|
||||
rc = fdt_next_cache_get(fdt, noff, &dev->next);
|
||||
if (rc == SBI_ENOENT)
|
||||
dev->next = NULL;
|
||||
else if (rc)
|
||||
return rc;
|
||||
|
||||
return cache_add(dev);
|
||||
}
|
||||
|
||||
static int fdt_cache_add_generic(const void *fdt, int noff)
|
||||
{
|
||||
struct cache_device *dev;
|
||||
int rc;
|
||||
|
||||
dev = sbi_zalloc(sizeof(*dev));
|
||||
if (!dev)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
rc = fdt_cache_add(fdt, noff, dev);
|
||||
if (rc) {
|
||||
sbi_free(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fdt_cache_find(const void *fdt, int noff, struct cache_device **out_dev)
|
||||
{
|
||||
struct cache_device *dev = cache_find(noff);
|
||||
int rc;
|
||||
|
||||
if (!dev) {
|
||||
rc = fdt_driver_init_by_offset(fdt, noff, fdt_cache_drivers);
|
||||
if (rc == SBI_ENODEV)
|
||||
rc = fdt_cache_add_generic(fdt, noff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev = cache_find(noff);
|
||||
if (!dev)
|
||||
return SBI_EFAIL;
|
||||
}
|
||||
|
||||
if (out_dev)
|
||||
*out_dev = dev;
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
int fdt_next_cache_get(const void *fdt, int noff, struct cache_device **out_dev)
|
||||
{
|
||||
const fdt32_t *val;
|
||||
int len;
|
||||
|
||||
val = fdt_getprop(fdt, noff, "next-level-cache", &len);
|
||||
if (!val || len < sizeof(*val))
|
||||
return SBI_ENOENT;
|
||||
|
||||
noff = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(val[0]));
|
||||
if (noff < 0)
|
||||
return noff;
|
||||
|
||||
return fdt_cache_find(fdt, noff, out_dev);
|
||||
}
|
||||
3
lib/utils/cache/fdt_cache_drivers.carray
vendored
Normal file
3
lib/utils/cache/fdt_cache_drivers.carray
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
HEADER: sbi_utils/cache/fdt_cache.h
|
||||
TYPE: const struct fdt_driver
|
||||
NAME: fdt_cache_drivers
|
||||
114
lib/utils/cache/fdt_cmo_helper.c
vendored
Normal file
114
lib/utils/cache/fdt_cmo_helper.c
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_scratch.h>
|
||||
#include <sbi_utils/cache/fdt_cache.h>
|
||||
#include <sbi_utils/cache/fdt_cmo_helper.h>
|
||||
#include <sbi_utils/fdt/fdt_helper.h>
|
||||
|
||||
static unsigned long flc_offset;
|
||||
|
||||
#define get_hart_flc(_s) \
|
||||
sbi_scratch_read_type(_s, struct cache_device *, flc_offset)
|
||||
#define set_hart_flc(_s, _p) \
|
||||
sbi_scratch_write_type(_s, struct cache_device *, flc_offset, _p)
|
||||
|
||||
int fdt_cmo_private_flc_flush_all(void)
|
||||
{
|
||||
struct cache_device *flc = get_hart_flc(sbi_scratch_thishart_ptr());
|
||||
|
||||
if (!flc || !flc->cpu_private)
|
||||
return SBI_ENODEV;
|
||||
|
||||
return cache_flush_all(flc);
|
||||
}
|
||||
|
||||
int fdt_cmo_llc_flush_all(void)
|
||||
{
|
||||
struct cache_device *llc = get_hart_flc(sbi_scratch_thishart_ptr());
|
||||
|
||||
if (!llc)
|
||||
return SBI_ENODEV;
|
||||
|
||||
while (llc->next)
|
||||
llc = llc->next;
|
||||
|
||||
return cache_flush_all(llc);
|
||||
}
|
||||
|
||||
static int fdt_cmo_cold_init(const void *fdt)
|
||||
{
|
||||
struct sbi_scratch *scratch;
|
||||
struct cache_device *dev;
|
||||
int cpu_offset, cpus_offset, rc;
|
||||
u32 hartid;
|
||||
|
||||
cpus_offset = fdt_path_offset(fdt, "/cpus");
|
||||
if (cpus_offset < 0)
|
||||
return SBI_EINVAL;
|
||||
|
||||
fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
|
||||
rc = fdt_parse_hart_id(fdt, cpu_offset, &hartid);
|
||||
if (rc)
|
||||
continue;
|
||||
|
||||
scratch = sbi_hartid_to_scratch(hartid);
|
||||
if (!scratch)
|
||||
continue;
|
||||
|
||||
rc = fdt_next_cache_get(fdt, cpu_offset, &dev);
|
||||
if (rc && rc != SBI_ENOENT)
|
||||
return rc;
|
||||
if (rc == SBI_ENOENT)
|
||||
dev = NULL;
|
||||
|
||||
set_hart_flc(scratch, dev);
|
||||
}
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static int fdt_cmo_warm_init(void)
|
||||
{
|
||||
struct cache_device *cur = get_hart_flc(sbi_scratch_thishart_ptr());
|
||||
int rc;
|
||||
|
||||
while (cur) {
|
||||
if (cur->ops && cur->ops->warm_init) {
|
||||
rc = cur->ops->warm_init(cur);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
cur = cur->next;
|
||||
}
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
int fdt_cmo_init(bool cold_boot)
|
||||
{
|
||||
const void *fdt = fdt_get_address();
|
||||
int rc;
|
||||
|
||||
if (cold_boot) {
|
||||
flc_offset = sbi_scratch_alloc_type_offset(struct cache_device *);
|
||||
if (!flc_offset)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
rc = fdt_cmo_cold_init(fdt);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = fdt_cmo_warm_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
175
lib/utils/cache/fdt_sifive_ccache.c
vendored
Normal file
175
lib/utils/cache/fdt_sifive_ccache.c
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/riscv_barrier.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi_utils/cache/fdt_cache.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
|
||||
#define CCACHE_CFG_CSR 0
|
||||
#define CCACHE_CMD_CSR 0x280
|
||||
#define CCACHE_STATUS_CSR 0x288
|
||||
|
||||
#define CFG_CSR_BANK_MASK 0xff
|
||||
#define CFG_CSR_WAY_MASK 0xff00
|
||||
#define CFG_CSR_WAY_OFFSET 8
|
||||
#define CFG_CSR_SET_MASK 0xff0000
|
||||
#define CFG_CSR_SET_OFFSET 16
|
||||
|
||||
#define CMD_CSR_CMD_OFFSET 56
|
||||
#define CMD_CSR_BANK_OFFSET 6
|
||||
|
||||
#define CMD_OPCODE_SETWAY 0x1ULL
|
||||
#define CMD_OPCODE_OFFSET 0x2ULL
|
||||
|
||||
#define CFLUSH_SETWAY_CLEANINV ((CMD_OPCODE_SETWAY << CMD_OPCODE_OFFSET) | 0x3)
|
||||
|
||||
#define CCACHE_CMD_QLEN 0xff
|
||||
|
||||
#define ccache_mb_b() RISCV_FENCE(rw, o)
|
||||
#define ccache_mb_a() RISCV_FENCE(o, rw)
|
||||
|
||||
#define CCACHE_ALL_OP_REQ_BATCH_NUM 0x10
|
||||
#define CCACHE_ALL_OP_REQ_BATCH_MASK (CCACHE_CMD_QLEN + 1 - CCACHE_ALL_OP_REQ_BATCH_NUM)
|
||||
|
||||
struct sifive_ccache {
|
||||
struct cache_device dev;
|
||||
void *addr;
|
||||
u64 total_lines;
|
||||
};
|
||||
|
||||
#define to_ccache(_dev) container_of(_dev, struct sifive_ccache, dev)
|
||||
|
||||
static inline unsigned int sifive_ccache_read_status(void *status_addr)
|
||||
{
|
||||
return readl_relaxed(status_addr);
|
||||
}
|
||||
|
||||
static inline void sifive_ccache_write_cmd(u64 cmd, void *cmd_csr_addr)
|
||||
{
|
||||
#if __riscv_xlen != 32
|
||||
writeq_relaxed(cmd, cmd_csr_addr);
|
||||
#else
|
||||
/*
|
||||
* The cache maintenance request is only generated when the "command"
|
||||
* field (part of the high word) is written.
|
||||
*/
|
||||
writel_relaxed(cmd, cmd_csr_addr);
|
||||
writel(cmd >> 32, cmd_csr_addr + 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int sifive_ccache_flush_all(struct cache_device *dev)
|
||||
{
|
||||
struct sifive_ccache *ccache = to_ccache(dev);
|
||||
void *status_addr = (char *)ccache->addr + CCACHE_STATUS_CSR;
|
||||
void *cmd_csr_addr = (char *)ccache->addr + CCACHE_CMD_CSR;
|
||||
u64 total_cnt = ccache->total_lines;
|
||||
u64 cmd = CFLUSH_SETWAY_CLEANINV << CMD_CSR_CMD_OFFSET;
|
||||
int loop_cnt = CCACHE_CMD_QLEN & CCACHE_ALL_OP_REQ_BATCH_MASK;
|
||||
|
||||
ccache_mb_b();
|
||||
send_cmd:
|
||||
total_cnt -= loop_cnt;
|
||||
while (loop_cnt > 0) {
|
||||
sifive_ccache_write_cmd(cmd + (0 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (1 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (2 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (3 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (4 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (5 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (6 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (7 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (8 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (9 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (10 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (11 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (12 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (13 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (14 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
sifive_ccache_write_cmd(cmd + (15 << CMD_CSR_BANK_OFFSET), cmd_csr_addr);
|
||||
cmd += CCACHE_ALL_OP_REQ_BATCH_NUM << CMD_CSR_BANK_OFFSET;
|
||||
loop_cnt -= CCACHE_ALL_OP_REQ_BATCH_NUM;
|
||||
}
|
||||
if (!total_cnt)
|
||||
goto done;
|
||||
|
||||
/* Ensure the ccache is able receive more than 16 requests */
|
||||
do {
|
||||
loop_cnt = (CCACHE_CMD_QLEN - sifive_ccache_read_status(status_addr));
|
||||
} while (loop_cnt < CCACHE_ALL_OP_REQ_BATCH_NUM);
|
||||
loop_cnt &= CCACHE_ALL_OP_REQ_BATCH_MASK;
|
||||
|
||||
if (total_cnt < loop_cnt) {
|
||||
loop_cnt = (total_cnt + CCACHE_ALL_OP_REQ_BATCH_NUM) & CCACHE_ALL_OP_REQ_BATCH_MASK;
|
||||
cmd -= ((loop_cnt - total_cnt) << CMD_CSR_BANK_OFFSET);
|
||||
total_cnt = loop_cnt;
|
||||
}
|
||||
goto send_cmd;
|
||||
done:
|
||||
do {} while (sifive_ccache_read_status(status_addr));
|
||||
ccache_mb_a();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cache_ops sifive_ccache_ops = {
|
||||
.cache_flush_all = sifive_ccache_flush_all,
|
||||
};
|
||||
|
||||
static int sifive_ccache_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
struct sifive_ccache *ccache;
|
||||
struct cache_device *dev;
|
||||
u64 reg_addr = 0;
|
||||
u32 config_csr, banks, sets, ways;
|
||||
int rc;
|
||||
|
||||
/* find the ccache base control address */
|
||||
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, ®_addr, NULL);
|
||||
if (rc < 0 && reg_addr)
|
||||
return SBI_ENODEV;
|
||||
|
||||
ccache = sbi_zalloc(sizeof(*ccache));
|
||||
if (!ccache)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
dev = &ccache->dev;
|
||||
dev->ops = &sifive_ccache_ops;
|
||||
rc = fdt_cache_add(fdt, nodeoff, dev);
|
||||
if (rc) {
|
||||
sbi_free(ccache);
|
||||
return rc;
|
||||
}
|
||||
|
||||
ccache->addr = (void *)(uintptr_t)reg_addr;
|
||||
|
||||
/* get the info of ccache from config CSR */
|
||||
config_csr = readl(ccache->addr + CCACHE_CFG_CSR);
|
||||
banks = config_csr & CFG_CSR_BANK_MASK;
|
||||
|
||||
sets = (config_csr & CFG_CSR_SET_MASK) >> CFG_CSR_SET_OFFSET;
|
||||
sets = (1 << sets);
|
||||
|
||||
ways = (config_csr & CFG_CSR_WAY_MASK) >> CFG_CSR_WAY_OFFSET;
|
||||
|
||||
ccache->total_lines = sets * ways * banks;
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static const struct fdt_match sifive_ccache_match[] = {
|
||||
{ .compatible = "sifive,ccache2" },
|
||||
{},
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_sifive_ccache = {
|
||||
.match_table = sifive_ccache_match,
|
||||
.init = sifive_ccache_cold_init,
|
||||
};
|
||||
195
lib/utils/cache/fdt_sifive_ec.c
vendored
Normal file
195
lib/utils/cache/fdt_sifive_ec.c
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_platform.h>
|
||||
#include <sbi_utils/cache/fdt_cache.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
|
||||
#define SIFIVE_EC_FEATURE_DISABLE_OFF 0x100UL
|
||||
#define SIFIVE_EC_FLUSH_CMD_OFF 0x800UL
|
||||
#define SIFIVE_EC_FLUSH_STATUS_OFF 0x808UL
|
||||
#define SIFIVE_EC_FLUSH_ADDR_OFF 0x810UL
|
||||
#define SIFIVE_EC_MODE_CTRL 0xa00UL
|
||||
|
||||
#define SIFIVE_EC_FLUSH_COMPLETION_MASK BIT(0)
|
||||
|
||||
#define SIFIVE_EC_CLEANINV_ALL_CMD 0x3
|
||||
|
||||
#define SIFIVE_EC_FEATURE_DISABLE_VAL 0
|
||||
|
||||
struct sifive_ec_quirks {
|
||||
bool two_mode;
|
||||
char *reg_name;
|
||||
};
|
||||
|
||||
struct sifive_ec_slice {
|
||||
void *addr;
|
||||
bool last_slice;
|
||||
};
|
||||
|
||||
struct sifive_ec {
|
||||
struct cache_device dev;
|
||||
struct sifive_ec_slice *slices;
|
||||
};
|
||||
|
||||
#define to_ec(_dev) container_of(_dev, struct sifive_ec, dev)
|
||||
|
||||
static int sifive_ec_flush_all(struct cache_device *dev)
|
||||
{
|
||||
struct sifive_ec *ec_dev = to_ec(dev);
|
||||
struct sifive_ec_slice *slices = ec_dev->slices;
|
||||
u32 cmd = SIFIVE_EC_CLEANINV_ALL_CMD, i = 0;
|
||||
void *addr;
|
||||
|
||||
do {
|
||||
addr = slices[i].addr;
|
||||
|
||||
writel((int)-1, addr + SIFIVE_EC_FLUSH_ADDR_OFF);
|
||||
writel((int)-1, addr + SIFIVE_EC_FLUSH_ADDR_OFF + sizeof(u32));
|
||||
writel(cmd, addr + SIFIVE_EC_FLUSH_CMD_OFF);
|
||||
} while (!slices[i++].last_slice);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
addr = slices[i].addr;
|
||||
do {} while (!(readl(addr + SIFIVE_EC_FLUSH_STATUS_OFF) &
|
||||
SIFIVE_EC_FLUSH_COMPLETION_MASK));
|
||||
} while (!slices[i++].last_slice);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sifive_ec_warm_init(struct cache_device *dev)
|
||||
{
|
||||
struct sifive_ec *ec_dev = to_ec(dev);
|
||||
struct sifive_ec_slice *slices = ec_dev->slices;
|
||||
struct sbi_domain *dom = sbi_domain_thishart_ptr();
|
||||
int i = 0;
|
||||
|
||||
if (dom->boot_hartid == current_hartid()) {
|
||||
do {
|
||||
writel(SIFIVE_EC_FEATURE_DISABLE_VAL,
|
||||
slices[i].addr + SIFIVE_EC_FEATURE_DISABLE_OFF);
|
||||
} while (!slices[i++].last_slice);
|
||||
}
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static struct cache_ops sifive_ec_ops = {
|
||||
.warm_init = sifive_ec_warm_init,
|
||||
.cache_flush_all = sifive_ec_flush_all,
|
||||
};
|
||||
|
||||
static int sifive_ec_slices_cold_init(const void *fdt, int nodeoff,
|
||||
struct sifive_ec_slice *slices,
|
||||
const struct sifive_ec_quirks *quirks)
|
||||
{
|
||||
int rc, subnode, slice_idx = -1;
|
||||
u64 reg_addr, size, start_addr = -1, end_addr = 0;
|
||||
|
||||
fdt_for_each_subnode(subnode, fdt, nodeoff) {
|
||||
rc = fdt_get_node_addr_size_by_name(fdt, subnode, quirks->reg_name, ®_addr,
|
||||
&size);
|
||||
if (rc < 0)
|
||||
return SBI_ENODEV;
|
||||
|
||||
if (reg_addr < start_addr)
|
||||
start_addr = reg_addr;
|
||||
|
||||
if (reg_addr + size > end_addr)
|
||||
end_addr = reg_addr + size;
|
||||
|
||||
slices[++slice_idx].addr = (void *)(uintptr_t)reg_addr;
|
||||
}
|
||||
slices[slice_idx].last_slice = true;
|
||||
|
||||
/* Only enable the pmp to protect the EC m-mode region when it support two mode */
|
||||
if (quirks->two_mode) {
|
||||
rc = sbi_domain_root_add_memrange((unsigned long)start_addr,
|
||||
(unsigned long)(end_addr - start_addr),
|
||||
BIT(12),
|
||||
(SBI_DOMAIN_MEMREGION_MMIO |
|
||||
SBI_DOMAIN_MEMREGION_M_READABLE |
|
||||
SBI_DOMAIN_MEMREGION_M_WRITABLE));
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static int sifive_ec_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
const struct sifive_ec_quirks *quirks = match->data;
|
||||
struct sifive_ec_slice *slices;
|
||||
struct sifive_ec *ec_dev;
|
||||
struct cache_device *dev;
|
||||
int subnode, rc = SBI_ENOMEM;
|
||||
u32 slice_count = 0;
|
||||
|
||||
/* Count the number of slices */
|
||||
fdt_for_each_subnode(subnode, fdt, nodeoff)
|
||||
slice_count++;
|
||||
|
||||
/* Need at least one slice */
|
||||
if (!slice_count)
|
||||
return SBI_EINVAL;
|
||||
|
||||
ec_dev = sbi_zalloc(sizeof(*ec_dev));
|
||||
if (!ec_dev)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
slices = sbi_zalloc(slice_count * sizeof(*slices));
|
||||
if (!slices)
|
||||
goto free_ec;
|
||||
|
||||
rc = sifive_ec_slices_cold_init(fdt, nodeoff, slices, quirks);
|
||||
if (rc)
|
||||
goto free_slice;
|
||||
|
||||
dev = &ec_dev->dev;
|
||||
dev->ops = &sifive_ec_ops;
|
||||
rc = fdt_cache_add(fdt, nodeoff, dev);
|
||||
if (rc)
|
||||
goto free_slice;
|
||||
|
||||
ec_dev->slices = slices;
|
||||
|
||||
return SBI_OK;
|
||||
|
||||
free_slice:
|
||||
sbi_free(slices);
|
||||
free_ec:
|
||||
sbi_free(ec_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct sifive_ec_quirks sifive_extensiblecache0_quirks = {
|
||||
.two_mode = false,
|
||||
.reg_name = "control",
|
||||
};
|
||||
|
||||
static const struct sifive_ec_quirks sifive_extensiblecache4_quirks = {
|
||||
.two_mode = true,
|
||||
.reg_name = "m_mode",
|
||||
};
|
||||
|
||||
static const struct fdt_match sifive_ec_match[] = {
|
||||
{ .compatible = "sifive,extensiblecache4", .data = &sifive_extensiblecache4_quirks },
|
||||
{ .compatible = "sifive,extensiblecache3", .data = &sifive_extensiblecache0_quirks },
|
||||
{ .compatible = "sifive,extensiblecache2", .data = &sifive_extensiblecache0_quirks },
|
||||
{ .compatible = "sifive,extensiblecache0", .data = &sifive_extensiblecache0_quirks },
|
||||
{},
|
||||
};
|
||||
|
||||
struct fdt_driver fdt_sifive_ec = {
|
||||
.match_table = sifive_ec_match,
|
||||
.init = sifive_ec_cold_init,
|
||||
};
|
||||
139
lib/utils/cache/fdt_sifive_pl2.c
vendored
Normal file
139
lib/utils/cache/fdt_sifive_pl2.c
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive Inc.
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi_utils/cache/fdt_cache.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
|
||||
#define FLUSH64_CMD_TARGET_ALL (0x2 << 3)
|
||||
#define FLUSH64_CMD_TYPE_FLUSH 0x3ULL
|
||||
|
||||
#define SIFIVE_PL2CACHE_CMD_QLEN 0xff
|
||||
|
||||
#define SIFIVE_PL2CACHE_FLUSH64_OFF 0x200ULL
|
||||
#define SIFIVE_PL2CACHE_STATUS_OFF 0x208ULL
|
||||
#define SIFIVE_PL2CACHE_CONFIG1_OFF 0x1000ULL
|
||||
#define SIFIVE_PL2CACHE_CONFIG0_OFF 0x1008ULL
|
||||
|
||||
#define FLUSH64_CMD_POS 56
|
||||
#define REGIONCLOCKDISABLE_MASK BIT(3)
|
||||
|
||||
#define CONFIG0_ACCEPT_DIRTY_DATA_ENABLE BIT(24)
|
||||
|
||||
struct sifive_pl2_quirks {
|
||||
bool no_dirty_fill;
|
||||
};
|
||||
|
||||
struct sifive_pl2 {
|
||||
struct cache_device dev;
|
||||
void *addr;
|
||||
bool no_dirty_fill;
|
||||
};
|
||||
|
||||
#define to_pl2(_dev) container_of(_dev, struct sifive_pl2, dev)
|
||||
|
||||
static int sifive_pl2_flush_all(struct cache_device *dev)
|
||||
{
|
||||
struct sifive_pl2 *pl2_dev = to_pl2(dev);
|
||||
char *addr = pl2_dev->addr;
|
||||
u64 cmd = (FLUSH64_CMD_TARGET_ALL | FLUSH64_CMD_TYPE_FLUSH) << FLUSH64_CMD_POS;
|
||||
u32 config0;
|
||||
|
||||
/*
|
||||
* While flushing pl2 cache, a speculative load might causes a dirty line pull
|
||||
* into PL2. It will cause the SiFive SMC0 refuse to enter the power gating.
|
||||
* Disable the ACCEPT_DIRTY_DATA_ENABLE to avoid the issue.
|
||||
*/
|
||||
if (pl2_dev->no_dirty_fill) {
|
||||
config0 = readl((void *)addr + SIFIVE_PL2CACHE_CONFIG0_OFF);
|
||||
config0 &= ~CONFIG0_ACCEPT_DIRTY_DATA_ENABLE;
|
||||
writel(config0, (void *)addr + SIFIVE_PL2CACHE_CONFIG0_OFF);
|
||||
}
|
||||
|
||||
#if __riscv_xlen != 32
|
||||
writeq(cmd, addr + SIFIVE_PL2CACHE_FLUSH64_OFF);
|
||||
#else
|
||||
writel((u32)cmd, addr + SIFIVE_PL2CACHE_FLUSH64_OFF);
|
||||
writel((u32)(cmd >> 32), addr + SIFIVE_PL2CACHE_FLUSH64_OFF + sizeof(u32));
|
||||
#endif
|
||||
do {} while (readl(addr + SIFIVE_PL2CACHE_STATUS_OFF) & SIFIVE_PL2CACHE_CMD_QLEN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sifive_pl2_warm_init(struct cache_device *dev)
|
||||
{
|
||||
struct sifive_pl2 *pl2_dev = to_pl2(dev);
|
||||
char *addr = pl2_dev->addr;
|
||||
u32 val;
|
||||
|
||||
/* Enabling the clock gating */
|
||||
val = readl(addr + SIFIVE_PL2CACHE_CONFIG1_OFF);
|
||||
val &= (~REGIONCLOCKDISABLE_MASK);
|
||||
writel(val, addr + SIFIVE_PL2CACHE_CONFIG1_OFF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cache_ops sifive_pl2_ops = {
|
||||
.warm_init = sifive_pl2_warm_init,
|
||||
.cache_flush_all = sifive_pl2_flush_all,
|
||||
};
|
||||
|
||||
static int sifive_pl2_cold_init(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
const struct sifive_pl2_quirks *quirk = match->data;
|
||||
struct sifive_pl2 *pl2_dev;
|
||||
struct cache_device *dev;
|
||||
u64 reg_addr;
|
||||
int rc;
|
||||
|
||||
/* find the pl2 control base address */
|
||||
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, ®_addr, NULL);
|
||||
if (rc < 0 && reg_addr)
|
||||
return SBI_ENODEV;
|
||||
|
||||
pl2_dev = sbi_zalloc(sizeof(*pl2_dev));
|
||||
if (!pl2_dev)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
dev = &pl2_dev->dev;
|
||||
dev->ops = &sifive_pl2_ops;
|
||||
dev->cpu_private = true;
|
||||
|
||||
rc = fdt_cache_add(fdt, nodeoff, dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pl2_dev->addr = (void *)(uintptr_t)reg_addr;
|
||||
if (quirk)
|
||||
pl2_dev->no_dirty_fill = quirk->no_dirty_fill;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct sifive_pl2_quirks pl2cache2_quirks = {
|
||||
.no_dirty_fill = true,
|
||||
};
|
||||
|
||||
static const struct sifive_pl2_quirks pl2cache0_quirks = {
|
||||
.no_dirty_fill = false,
|
||||
};
|
||||
|
||||
static const struct fdt_match sifive_pl2_match[] = {
|
||||
{ .compatible = "sifive,pl2cache2", .data = &pl2cache2_quirks },
|
||||
{ .compatible = "sifive,pl2cache1", .data = &pl2cache0_quirks },
|
||||
{ .compatible = "sifive,pl2cache0", .data = &pl2cache0_quirks },
|
||||
{},
|
||||
};
|
||||
|
||||
struct fdt_driver fdt_sifive_pl2 = {
|
||||
.match_table = sifive_pl2_match,
|
||||
.init = sifive_pl2_cold_init,
|
||||
};
|
||||
20
lib/utils/cache/objects.mk
vendored
Normal file
20
lib/utils/cache/objects.mk
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
#
|
||||
# Copyright (c) 2025 SiFive
|
||||
#
|
||||
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cache.o
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cache_drivers.carray.o
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE) += cache/fdt_cmo_helper.o
|
||||
|
||||
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_CCACHE) += fdt_sifive_ccache
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_CCACHE) += cache/fdt_sifive_ccache.o
|
||||
|
||||
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_PL2) += fdt_sifive_pl2
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_PL2) += cache/fdt_sifive_pl2.o
|
||||
|
||||
carray-fdt_cache_drivers-$(CONFIG_FDT_CACHE_SIFIVE_EC) += fdt_sifive_ec
|
||||
libsbiutils-objs-$(CONFIG_FDT_CACHE_SIFIVE_EC) += cache/fdt_sifive_ec.o
|
||||
|
||||
libsbiutils-objs-$(CONFIG_CACHE) += cache/cache.o
|
||||
@@ -185,7 +185,7 @@ static void fdt_domain_based_fixup_one(void *fdt, int nodeoff)
|
||||
return;
|
||||
|
||||
if (!sbi_domain_check_addr(dom, reg_addr, dom->next_mode,
|
||||
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) {
|
||||
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE | SBI_DOMAIN_MMIO)) {
|
||||
rc = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + 32);
|
||||
if (rc < 0)
|
||||
return;
|
||||
|
||||
@@ -84,23 +84,27 @@ static int fdt_translate_address(const void *fdt, uint64_t reg, int parent,
|
||||
uint64_t *addr)
|
||||
{
|
||||
int i, rlen;
|
||||
int cell_addr, cell_size;
|
||||
int cell_parent_addr, cell_child_addr, cell_size;
|
||||
const fdt32_t *ranges;
|
||||
uint64_t offset, caddr = 0, paddr = 0, rsize = 0;
|
||||
|
||||
cell_addr = fdt_address_cells(fdt, parent);
|
||||
if (cell_addr < 1)
|
||||
ranges = fdt_getprop(fdt, parent, "ranges", &rlen);
|
||||
if (ranges && rlen > 0) {
|
||||
cell_child_addr = fdt_address_cells(fdt, parent);
|
||||
if (cell_child_addr < 1)
|
||||
return SBI_ENODEV;
|
||||
|
||||
cell_parent_addr = fdt_address_cells(fdt, fdt_parent_offset(fdt, parent));
|
||||
if (cell_parent_addr < 1)
|
||||
return SBI_ENODEV;
|
||||
|
||||
cell_size = fdt_size_cells(fdt, parent);
|
||||
if (cell_size < 0)
|
||||
return SBI_ENODEV;
|
||||
|
||||
ranges = fdt_getprop(fdt, parent, "ranges", &rlen);
|
||||
if (ranges && rlen > 0) {
|
||||
for (i = 0; i < cell_addr; i++)
|
||||
for (i = 0; i < cell_child_addr; i++)
|
||||
caddr = (caddr << 32) | fdt32_to_cpu(*ranges++);
|
||||
for (i = 0; i < cell_addr; i++)
|
||||
for (i = 0; i < cell_parent_addr; i++)
|
||||
paddr = (paddr << 32) | fdt32_to_cpu(*ranges++);
|
||||
for (i = 0; i < cell_size; i++)
|
||||
rsize = (rsize << 32) | fdt32_to_cpu(*ranges++);
|
||||
|
||||
@@ -14,6 +14,15 @@ config FDT_HSM_RPMI
|
||||
depends on FDT_MAILBOX && RPMI_MAILBOX
|
||||
default n
|
||||
|
||||
config FDT_HSM_SIFIVE_TMC0
|
||||
bool "FDT SiFive TMC v0 driver"
|
||||
depends on FDT_CACHE
|
||||
default n
|
||||
|
||||
config FDT_HSM_SPACEMIT
|
||||
bool "FDT SPACEMIT HSM driver"
|
||||
default n
|
||||
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
||||
367
lib/utils/hsm/fdt_hsm_sifive_tmc0.c
Normal file
367
lib/utils/hsm/fdt_hsm_sifive_tmc0.c
Normal file
@@ -0,0 +1,367 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/riscv_asm.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_bitops.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_ipi.h>
|
||||
#include <sbi_utils/cache/fdt_cmo_helper.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
#include <sbi_utils/fdt/fdt_helper.h>
|
||||
#include <sbi_utils/hsm/fdt_hsm_sifive_inst.h>
|
||||
#include <sbi_utils/hsm/fdt_hsm_sifive_tmc0.h>
|
||||
|
||||
struct sifive_tmc0 {
|
||||
unsigned long reg;
|
||||
struct sbi_dlist node;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
static SBI_LIST_HEAD(tmc0_list);
|
||||
static unsigned long tmc0_offset;
|
||||
|
||||
#define tmc0_ptr_get(__scratch) \
|
||||
sbi_scratch_read_type((__scratch), struct sifive_tmc0 *, tmc0_offset)
|
||||
|
||||
#define tmc0_ptr_set(__scratch, __tmc0) \
|
||||
sbi_scratch_write_type((__scratch), struct sifive_tmc0 *, tmc0_offset, (__tmc0))
|
||||
|
||||
/* TMC.PGPREP */
|
||||
#define SIFIVE_TMC_PGPREP_OFF 0x0
|
||||
#define SIFIVE_TMC_PGPREP_ENA_REQ BIT(31)
|
||||
#define SIFIVE_TMC_PGPREP_ENA_ACK BIT(30)
|
||||
#define SIFIVE_TMC_PGPREP_DIS_REQ BIT(29)
|
||||
#define SIFIVE_TMC_PGPREP_DIS_ACK BIT(28)
|
||||
#define SIFIVE_TMC_PGPREP_CLFPNOTQ BIT(18)
|
||||
#define SIFIVE_TMC_PGPREP_PMCENAERR BIT(17)
|
||||
#define SIFIVE_TMC_PGPREP_PMCDENY BIT(16)
|
||||
#define SIFIVE_TMC_PGPREP_BUSERR BIT(15)
|
||||
#define SIFIVE_TMC_PGPREP_WAKE_DETECT BIT(12)
|
||||
#define SIFIVE_TMC_PGPREP_INTERNAL_ABORT BIT(2)
|
||||
#define SIFIVE_TMC_PGPREP_ENARSP (SIFIVE_TMC_PGPREP_CLFPNOTQ | \
|
||||
SIFIVE_TMC_PGPREP_PMCENAERR | \
|
||||
SIFIVE_TMC_PGPREP_PMCDENY | \
|
||||
SIFIVE_TMC_PGPREP_BUSERR | \
|
||||
SIFIVE_TMC_PGPREP_WAKE_DETECT)
|
||||
|
||||
/* TMC.PG */
|
||||
#define SIFIVE_TMC_PG_OFF 0x4
|
||||
#define SIFIVE_TMC_PG_ENA_REQ BIT(31)
|
||||
#define SIFIVE_TMC_PG_ENA_ACK BIT(30)
|
||||
#define SIFIVE_TMC_PG_DIS_REQ BIT(29)
|
||||
#define SIFIVE_TMC_PG_DIS_ACK BIT(28)
|
||||
#define SIFIVE_TMC_PG_PMC_ENA_ERR BIT(17)
|
||||
#define SIFIVE_TMC_PG_PMC_DENY BIT(16)
|
||||
#define SIFIVE_TMC_PG_BUS_ERR BIT(15)
|
||||
#define SIFIVE_TMC_PG_MASTNOTQ BIT(14)
|
||||
#define SIFIVE_TMC_PG_WARM_RESET BIT(1)
|
||||
#define SIFIVE_TMC_PG_ENARSP (SIFIVE_TMC_PG_PMC_ENA_ERR | \
|
||||
SIFIVE_TMC_PG_PMC_DENY | \
|
||||
SIFIVE_TMC_PG_BUS_ERR | \
|
||||
SIFIVE_TMC_PG_MASTNOTQ)
|
||||
|
||||
/* TMC.RESUMEPC */
|
||||
#define SIFIVE_TMC_RESUMEPC_LO 0x10
|
||||
#define SIFIVE_TMC_RESUMEPC_HI 0x14
|
||||
|
||||
/* TMC.WAKEMASK */
|
||||
#define SIFIVE_TMC_WAKE_MASK_OFF 0x20
|
||||
#define SIFIVE_TMC_WAKE_MASK_WREQ BIT(31)
|
||||
#define SIFIVE_TMC_WAKE_MASK_ACK BIT(30)
|
||||
|
||||
int sifive_tmc0_set_wakemask_enareq(u32 hartid)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
|
||||
unsigned long addr;
|
||||
u32 v;
|
||||
|
||||
if (!tmc0)
|
||||
return SBI_ENODEV;
|
||||
|
||||
addr = tmc0->reg + SIFIVE_TMC_WAKE_MASK_OFF;
|
||||
v = readl((void *)addr);
|
||||
writel(v | SIFIVE_TMC_WAKE_MASK_WREQ, (void *)addr);
|
||||
|
||||
while (!(readl((void *)addr) & SIFIVE_TMC_WAKE_MASK_ACK));
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
void sifive_tmc0_set_wakemask_disreq(u32 hartid)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
|
||||
unsigned long addr;
|
||||
u32 v;
|
||||
|
||||
if (!tmc0)
|
||||
return;
|
||||
|
||||
addr = tmc0->reg + SIFIVE_TMC_WAKE_MASK_OFF;
|
||||
v = readl((void *)addr);
|
||||
writel(v & ~SIFIVE_TMC_WAKE_MASK_WREQ, (void *)addr);
|
||||
|
||||
while (readl((void *)addr) & SIFIVE_TMC_WAKE_MASK_ACK);
|
||||
}
|
||||
|
||||
bool sifive_tmc0_is_pg(u32 hartid)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(scratch);
|
||||
unsigned long addr;
|
||||
u32 v;
|
||||
|
||||
if (!tmc0)
|
||||
return false;
|
||||
|
||||
addr = tmc0->reg + SIFIVE_TMC_PG_OFF;
|
||||
v = readl((void *)addr);
|
||||
if (!(v & SIFIVE_TMC_PG_ENA_ACK) ||
|
||||
(v & SIFIVE_TMC_PG_ENARSP) ||
|
||||
(v & SIFIVE_TMC_PG_DIS_REQ))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sifive_tmc0_set_resumepc(physical_addr_t addr)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
|
||||
|
||||
writel((u32)addr, (void *)(tmc0->reg + SIFIVE_TMC_RESUMEPC_LO));
|
||||
#if __riscv_xlen > 32
|
||||
writel((u32)(addr >> 32), (void *)(tmc0->reg + SIFIVE_TMC_RESUMEPC_HI));
|
||||
#endif
|
||||
}
|
||||
|
||||
static u32 sifive_tmc0_set_pgprep_enareq(void)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
|
||||
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
|
||||
u32 v = readl((void *)reg);
|
||||
|
||||
writel(v | SIFIVE_TMC_PGPREP_ENA_REQ, (void *)reg);
|
||||
while (!(readl((void *)reg) & SIFIVE_TMC_PGPREP_ENA_ACK));
|
||||
|
||||
v = readl((void *)reg);
|
||||
return v & SIFIVE_TMC_PGPREP_INTERNAL_ABORT;
|
||||
}
|
||||
|
||||
static void sifive_tmc0_set_pgprep_disreq(void)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
|
||||
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
|
||||
u32 v = readl((void *)reg);
|
||||
|
||||
writel(v | SIFIVE_TMC_PGPREP_DIS_REQ, (void *)reg);
|
||||
while (!(readl((void *)reg) & SIFIVE_TMC_PGPREP_DIS_ACK));
|
||||
}
|
||||
|
||||
static u32 sifive_tmc0_get_pgprep_enarsp(void)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
|
||||
unsigned long reg = tmc0->reg + SIFIVE_TMC_PGPREP_OFF;
|
||||
u32 v = readl((void *)reg);
|
||||
|
||||
return v & SIFIVE_TMC_PGPREP_ENARSP;
|
||||
}
|
||||
|
||||
static void sifive_tmc0_set_pg_enareq(void)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0 = tmc0_ptr_get(sbi_scratch_thishart_ptr());
|
||||
unsigned long reg = tmc0->reg + SIFIVE_TMC_PG_OFF;
|
||||
u32 v = readl((void *)reg);
|
||||
|
||||
writel(v | SIFIVE_TMC_PG_ENA_REQ, (void *)reg);
|
||||
}
|
||||
|
||||
static int sifive_tmc0_prep(void)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
u32 rc;
|
||||
|
||||
if (!tmc0_ptr_get(scratch))
|
||||
return SBI_ENODEV;
|
||||
|
||||
rc = sifive_tmc0_set_pgprep_enareq();
|
||||
if (rc) {
|
||||
sbi_printf("TMC0 error: Internal Abort (Wake detect)\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = sifive_tmc0_get_pgprep_enarsp();
|
||||
if (rc) {
|
||||
sifive_tmc0_set_pgprep_disreq();
|
||||
sbi_printf("TMC0 error: error response code: 0x%x\n", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sifive_tmc0_set_resumepc(scratch->warmboot_addr);
|
||||
|
||||
return SBI_OK;
|
||||
|
||||
fail:
|
||||
return SBI_EFAIL;
|
||||
}
|
||||
|
||||
static int sifive_tmc0_enter(void)
|
||||
{
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
u32 rc;
|
||||
|
||||
/* Flush cache and check if there is wake detect or bus error */
|
||||
if (fdt_cmo_private_flc_flush_all() &&
|
||||
sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1))
|
||||
sifive_cflush();
|
||||
|
||||
rc = sifive_tmc0_get_pgprep_enarsp();
|
||||
if (rc) {
|
||||
sbi_printf("TMC0 error: error response code: 0x%x\n", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CEASE)) {
|
||||
sifive_tmc0_set_pg_enareq();
|
||||
while (1)
|
||||
sifive_cease();
|
||||
}
|
||||
|
||||
rc = SBI_ENOTSUPP;
|
||||
fail:
|
||||
sifive_tmc0_set_pgprep_disreq();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sifive_tmc0_tile_pg(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = sifive_tmc0_prep();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return sifive_tmc0_enter();
|
||||
}
|
||||
|
||||
static int sifive_tmc0_start(u32 hartid, ulong saddr)
|
||||
{
|
||||
/*
|
||||
* In system suspend, the IMSIC will be reset in SiFive platform so
|
||||
* we use the CLINT IPI as the wake event.
|
||||
*/
|
||||
sbi_ipi_raw_send(sbi_hartid_to_hartindex(hartid), true);
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static int sifive_tmc0_stop(void)
|
||||
{
|
||||
unsigned long mie = csr_read(CSR_MIE);
|
||||
int rc;
|
||||
/* Set IPI as wake up source */
|
||||
csr_set(CSR_MIE, MIP_MEIP | MIP_MSIP);
|
||||
|
||||
rc = sifive_tmc0_tile_pg();
|
||||
if (rc) {
|
||||
csr_write(CSR_MIE, mie);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static struct sbi_hsm_device tmc0_hsm_dev = {
|
||||
.name = "SiFive TMC0",
|
||||
.hart_start = sifive_tmc0_start,
|
||||
.hart_stop = sifive_tmc0_stop,
|
||||
};
|
||||
|
||||
static int sifive_tmc0_bind_cpu(struct sifive_tmc0 *tmc0)
|
||||
{
|
||||
const void *fdt = fdt_get_address();
|
||||
struct sbi_scratch *scratch;
|
||||
int cpus_off, cpu_off, rc;
|
||||
const fdt32_t *val;
|
||||
u32 hartid;
|
||||
|
||||
cpus_off = fdt_path_offset(fdt, "/cpus");
|
||||
if (cpus_off < 0)
|
||||
return SBI_ENOENT;
|
||||
|
||||
fdt_for_each_subnode(cpu_off, fdt, cpus_off) {
|
||||
rc = fdt_parse_hart_id(fdt, cpu_off, &hartid);
|
||||
if (rc)
|
||||
continue;
|
||||
|
||||
scratch = sbi_hartid_to_scratch(hartid);
|
||||
if (!scratch)
|
||||
continue;
|
||||
|
||||
val = fdt_getprop(fdt, cpu_off, "power-domains", NULL);
|
||||
if (!val)
|
||||
return SBI_ENOENT;
|
||||
|
||||
if (tmc0->id == fdt32_to_cpu(val[0])) {
|
||||
tmc0_ptr_set(scratch, tmc0);
|
||||
return SBI_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return SBI_ENODEV;
|
||||
}
|
||||
|
||||
static int sifive_tmc0_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
struct sifive_tmc0 *tmc0;
|
||||
u64 addr;
|
||||
int rc;
|
||||
|
||||
if (!tmc0_offset) {
|
||||
tmc0_offset = sbi_scratch_alloc_type_offset(struct sifive_tmc0 *);
|
||||
if (!tmc0_offset)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
sbi_hsm_set_device(&tmc0_hsm_dev);
|
||||
}
|
||||
|
||||
tmc0 = sbi_zalloc(sizeof(*tmc0));
|
||||
if (!tmc0)
|
||||
return SBI_ENOMEM;
|
||||
|
||||
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &addr, NULL);
|
||||
if (rc)
|
||||
goto free_tmc0;
|
||||
|
||||
tmc0->reg = (unsigned long)addr;
|
||||
tmc0->id = fdt_get_phandle(fdt_get_address(), nodeoff);
|
||||
|
||||
rc = sifive_tmc0_bind_cpu(tmc0);
|
||||
if (rc)
|
||||
goto free_tmc0;
|
||||
|
||||
return SBI_OK;
|
||||
|
||||
free_tmc0:
|
||||
sbi_free(tmc0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct fdt_match sifive_tmc0_match[] = {
|
||||
{ .compatible = "sifive,tmc0" },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_hsm_sifive_tmc0 = {
|
||||
.match_table = sifive_tmc0_match,
|
||||
.init = sifive_tmc0_probe,
|
||||
};
|
||||
140
lib/utils/hsm/fdt_hsm_spacemit.c
Normal file
140
lib/utils/hsm/fdt_hsm_spacemit.c
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SpacemiT
|
||||
* Authors:
|
||||
* Xianbin Zhu <xianbin.zhu@linux.spacemit.com>
|
||||
* Troy Mitchell <troy.mitchell@linux.spacemit.com>
|
||||
*/
|
||||
|
||||
#include <platform_override.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <spacemit/k1.h>
|
||||
|
||||
static const u64 cpu_wakeup_reg[] = {
|
||||
PMU_AP_CORE0_WAKEUP,
|
||||
PMU_AP_CORE1_WAKEUP,
|
||||
PMU_AP_CORE2_WAKEUP,
|
||||
PMU_AP_CORE3_WAKEUP,
|
||||
PMU_AP_CORE4_WAKEUP,
|
||||
PMU_AP_CORE5_WAKEUP,
|
||||
PMU_AP_CORE6_WAKEUP,
|
||||
PMU_AP_CORE7_WAKEUP,
|
||||
};
|
||||
|
||||
static const u64 cpu_idle_reg[] = {
|
||||
PMU_AP_CORE0_IDLE_CFG,
|
||||
PMU_AP_CORE1_IDLE_CFG,
|
||||
PMU_AP_CORE2_IDLE_CFG,
|
||||
PMU_AP_CORE3_IDLE_CFG,
|
||||
PMU_AP_CORE4_IDLE_CFG,
|
||||
PMU_AP_CORE5_IDLE_CFG,
|
||||
PMU_AP_CORE6_IDLE_CFG,
|
||||
PMU_AP_CORE7_IDLE_CFG,
|
||||
};
|
||||
|
||||
static inline void spacemit_set_cpu_power(u32 hartid, bool enable)
|
||||
{
|
||||
unsigned int value;
|
||||
unsigned int *cpu_idle_base = (unsigned int *)(unsigned long)cpu_idle_reg[hartid];
|
||||
|
||||
value = readl(cpu_idle_base);
|
||||
|
||||
if (enable)
|
||||
value &= ~PMU_AP_IDLE_PWRDOWN_MASK;
|
||||
else
|
||||
value |= PMU_AP_IDLE_PWRDOWN_MASK;
|
||||
|
||||
writel(value, cpu_idle_base);
|
||||
}
|
||||
|
||||
static void spacemit_wakeup_cpu(u32 mpidr)
|
||||
{
|
||||
unsigned int *cpu_reset_base;
|
||||
unsigned int cur_hartid = current_hartid();
|
||||
|
||||
cpu_reset_base = (unsigned int *)(unsigned long)cpu_wakeup_reg[cur_hartid];
|
||||
|
||||
writel(1 << mpidr, cpu_reset_base);
|
||||
}
|
||||
|
||||
static void spacemit_assert_cpu(void)
|
||||
{
|
||||
spacemit_set_cpu_power(current_hartid(), false);
|
||||
}
|
||||
|
||||
static void spacemit_deassert_cpu(unsigned int hartid)
|
||||
{
|
||||
spacemit_set_cpu_power(hartid, true);
|
||||
}
|
||||
|
||||
/* Start (or power-up) the given hart */
|
||||
static int spacemit_hart_start(unsigned int hartid, unsigned long saddr)
|
||||
{
|
||||
spacemit_deassert_cpu(hartid);
|
||||
spacemit_wakeup_cpu(hartid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop (or power-down) the current hart from running. This call
|
||||
* doesn't expect to return if success.
|
||||
*/
|
||||
static int spacemit_hart_stop(void)
|
||||
{
|
||||
csr_write(CSR_STIMECMP, GENMASK_ULL(63, 0));
|
||||
csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP);
|
||||
|
||||
/* disable data preftch */
|
||||
csr_clear(CSR_MSETUP, MSETUP_PFE);
|
||||
asm volatile ("fence iorw, iorw");
|
||||
|
||||
/* flush local dcache */
|
||||
csr_write(CSR_MRAOP, MRAOP_ICACHE_INVALID);
|
||||
asm volatile ("fence iorw, iorw");
|
||||
|
||||
/* disable dcache */
|
||||
csr_clear(CSR_MSETUP, MSETUP_DE);
|
||||
asm volatile ("fence iorw, iorw");
|
||||
|
||||
/*
|
||||
* Core4-7 do not have dedicated bits in ML2SETUP;
|
||||
* instead, they reuse the same bits as core0-3.
|
||||
*
|
||||
* Thereforspacemit_deassert_cpue, use modulo with PLATFORM_MAX_CPUS_PER_CLUSTER
|
||||
* to select the proper bit.
|
||||
*/
|
||||
csr_clear(CSR_ML2SETUP, 1 << (current_hartid() % PLATFORM_MAX_CPUS_PER_CLUSTER));
|
||||
asm volatile ("fence iorw, iorw");
|
||||
|
||||
spacemit_assert_cpu();
|
||||
|
||||
wfi();
|
||||
|
||||
return SBI_ENOTSUPP;
|
||||
}
|
||||
|
||||
static const struct sbi_hsm_device spacemit_hsm_ops = {
|
||||
.name = "spacemit-hsm",
|
||||
.hart_start = spacemit_hart_start,
|
||||
.hart_stop = spacemit_hart_stop,
|
||||
};
|
||||
|
||||
static int spacemit_hsm_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
sbi_hsm_set_device(&spacemit_hsm_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct fdt_match spacemit_hsm_match[] = {
|
||||
{ .compatible = "spacemit,k1" },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_hsm_spacemit = {
|
||||
.match_table = spacemit_hsm_match,
|
||||
.init = spacemit_hsm_probe,
|
||||
};
|
||||
@@ -9,3 +9,9 @@
|
||||
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_RPMI) += fdt_hsm_rpmi
|
||||
libsbiutils-objs-$(CONFIG_FDT_HSM_RPMI) += hsm/fdt_hsm_rpmi.o
|
||||
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_SPACEMIT) += fdt_hsm_spacemit
|
||||
libsbiutils-objs-$(CONFIG_FDT_HSM_SPACEMIT) += hsm/fdt_hsm_spacemit.o
|
||||
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_HSM_SIFIVE_TMC0) += fdt_hsm_sifive_tmc0
|
||||
libsbiutils-objs-$(CONFIG_FDT_HSM_SIFIVE_TMC0) += hsm/fdt_hsm_sifive_tmc0.o
|
||||
@@ -62,6 +62,7 @@ static void mswi_ipi_clear(void)
|
||||
|
||||
static struct sbi_ipi_device aclint_mswi = {
|
||||
.name = "aclint-mswi",
|
||||
.rating = 100,
|
||||
.ipi_send = mswi_ipi_send,
|
||||
.ipi_clear = mswi_ipi_clear
|
||||
};
|
||||
@@ -106,7 +107,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
sbi_ipi_set_device(&aclint_mswi);
|
||||
sbi_ipi_add_device(&aclint_mswi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ static void plicsw_ipi_clear(void)
|
||||
|
||||
static struct sbi_ipi_device plicsw_ipi = {
|
||||
.name = "andes_plicsw",
|
||||
.rating = 200,
|
||||
.ipi_send = plicsw_ipi_send,
|
||||
.ipi_clear = plicsw_ipi_clear
|
||||
};
|
||||
@@ -99,7 +100,7 @@ int plicsw_cold_ipi_init(struct plicsw_data *plicsw)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
sbi_ipi_set_device(&plicsw_ipi);
|
||||
sbi_ipi_add_device(&plicsw_ipi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* Authors:
|
||||
* Anup Patel <anup.patel@wdc.com>
|
||||
*/
|
||||
|
||||
#include <sbi_utils/ipi/fdt_ipi.h>
|
||||
|
||||
/* List of FDT ipi drivers generated at compile time */
|
||||
extern const struct fdt_driver *const fdt_ipi_drivers[];
|
||||
|
||||
int fdt_ipi_init(void)
|
||||
{
|
||||
/*
|
||||
* On some single-hart system there is no need for IPIs,
|
||||
* so do not return a failure if no device is found.
|
||||
*/
|
||||
return fdt_driver_init_all(fdt_get_address(), fdt_ipi_drivers);
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
HEADER: sbi_utils/ipi/fdt_ipi.h
|
||||
TYPE: const struct fdt_driver
|
||||
NAME: fdt_ipi_drivers
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_heap.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
#include <sbi_utils/fdt/fdt_helper.h>
|
||||
#include <sbi_utils/ipi/fdt_ipi.h>
|
||||
#include <sbi_utils/ipi/aclint_mswi.h>
|
||||
|
||||
static int ipi_mswi_cold_init(const void *fdt, int nodeoff,
|
||||
@@ -57,6 +57,7 @@ static const struct fdt_match ipi_mswi_match[] = {
|
||||
{ .compatible = "sifive,clint0", .data = &clint_offset },
|
||||
{ .compatible = "thead,c900-clint", .data = &clint_offset },
|
||||
{ .compatible = "thead,c900-aclint-mswi" },
|
||||
{ .compatible = "mips,p8700-aclint-mswi" },
|
||||
{ .compatible = "riscv,aclint-mswi" },
|
||||
{ },
|
||||
};
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
*/
|
||||
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
#include <sbi_utils/fdt/fdt_helper.h>
|
||||
#include <sbi_utils/ipi/fdt_ipi.h>
|
||||
#include <sbi_utils/ipi/andes_plicsw.h>
|
||||
|
||||
extern struct plicsw_data plicsw;
|
||||
|
||||
@@ -10,11 +10,8 @@
|
||||
libsbiutils-objs-$(CONFIG_IPI_MSWI) += ipi/aclint_mswi.o
|
||||
libsbiutils-objs-$(CONFIG_IPI_PLICSW) += ipi/andes_plicsw.o
|
||||
|
||||
libsbiutils-objs-$(CONFIG_FDT_IPI) += ipi/fdt_ipi.o
|
||||
libsbiutils-objs-$(CONFIG_FDT_IPI) += ipi/fdt_ipi_drivers.carray.o
|
||||
|
||||
carray-fdt_ipi_drivers-$(CONFIG_FDT_IPI_MSWI) += fdt_ipi_mswi
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_IPI_MSWI) += fdt_ipi_mswi
|
||||
libsbiutils-objs-$(CONFIG_FDT_IPI_MSWI) += ipi/fdt_ipi_mswi.o
|
||||
|
||||
carray-fdt_ipi_drivers-$(CONFIG_FDT_IPI_PLICSW) += fdt_ipi_plicsw
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_IPI_PLICSW) += fdt_ipi_plicsw
|
||||
libsbiutils-objs-$(CONFIG_FDT_IPI_PLICSW) += ipi/fdt_ipi_plicsw.o
|
||||
|
||||
@@ -115,16 +115,96 @@
|
||||
#define APLIC_DISABLE_ITHRESHOLD 1
|
||||
#define APLIC_ENABLE_ITHRESHOLD 0
|
||||
|
||||
static SBI_LIST_HEAD(aplic_list);
|
||||
static void aplic_writel_msicfg(struct aplic_msicfg_data *msicfg,
|
||||
void *msicfgaddr, void *msicfgaddrH);
|
||||
|
||||
static void aplic_init(struct aplic_data *aplic)
|
||||
{
|
||||
struct aplic_delegate_data *deleg;
|
||||
u32 i, j, tmp;
|
||||
int locked;
|
||||
|
||||
/* Set domain configuration to 0 */
|
||||
writel(0, (void *)(aplic->addr + APLIC_DOMAINCFG));
|
||||
|
||||
/* Disable all interrupts */
|
||||
for (i = 0; i <= aplic->num_source; i += 32)
|
||||
writel(-1U, (void *)(aplic->addr + APLIC_CLRIE_BASE +
|
||||
(i / 32) * sizeof(u32)));
|
||||
|
||||
/* Set interrupt type and priority for all interrupts */
|
||||
for (i = 1; i <= aplic->num_source; i++) {
|
||||
/* Set IRQ source configuration to 0 */
|
||||
writel(0, (void *)(aplic->addr + APLIC_SOURCECFG_BASE +
|
||||
(i - 1) * sizeof(u32)));
|
||||
/* Set IRQ target hart index and priority to 1 */
|
||||
writel(APLIC_DEFAULT_PRIORITY, (void *)(aplic->addr +
|
||||
APLIC_TARGET_BASE +
|
||||
(i - 1) * sizeof(u32)));
|
||||
}
|
||||
|
||||
/* Configure IRQ delegation */
|
||||
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
|
||||
deleg = &aplic->delegate[i];
|
||||
if (!deleg->first_irq || !deleg->last_irq)
|
||||
continue;
|
||||
if (aplic->num_source < deleg->first_irq ||
|
||||
aplic->num_source < deleg->last_irq)
|
||||
continue;
|
||||
if (deleg->child_index > APLIC_SOURCECFG_CHILDIDX_MASK)
|
||||
continue;
|
||||
if (deleg->first_irq > deleg->last_irq) {
|
||||
tmp = deleg->first_irq;
|
||||
deleg->first_irq = deleg->last_irq;
|
||||
deleg->last_irq = tmp;
|
||||
}
|
||||
for (j = deleg->first_irq; j <= deleg->last_irq; j++)
|
||||
writel(APLIC_SOURCECFG_D | deleg->child_index,
|
||||
(void *)(aplic->addr + APLIC_SOURCECFG_BASE +
|
||||
(j - 1) * sizeof(u32)));
|
||||
}
|
||||
|
||||
/* Default initialization of IDC structures */
|
||||
for (i = 0; i < aplic->num_idc; i++) {
|
||||
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
|
||||
i * APLIC_IDC_SIZE + APLIC_IDC_IDELIVERY));
|
||||
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
|
||||
i * APLIC_IDC_SIZE + APLIC_IDC_IFORCE));
|
||||
writel(APLIC_DISABLE_ITHRESHOLD, (void *)(aplic->addr +
|
||||
APLIC_IDC_BASE +
|
||||
(i * APLIC_IDC_SIZE) +
|
||||
APLIC_IDC_ITHRESHOLD));
|
||||
}
|
||||
|
||||
/* MSI configuration */
|
||||
locked = readl((void *)(aplic->addr + APLIC_MMSICFGADDRH)) & APLIC_xMSICFGADDRH_L;
|
||||
if (aplic->targets_mmode && aplic->has_msicfg_mmode && !locked) {
|
||||
aplic_writel_msicfg(&aplic->msicfg_mmode,
|
||||
(void *)(aplic->addr + APLIC_MMSICFGADDR),
|
||||
(void *)(aplic->addr + APLIC_MMSICFGADDRH));
|
||||
}
|
||||
if (aplic->targets_mmode && aplic->has_msicfg_smode && !locked) {
|
||||
aplic_writel_msicfg(&aplic->msicfg_smode,
|
||||
(void *)(aplic->addr + APLIC_SMSICFGADDR),
|
||||
(void *)(aplic->addr + APLIC_SMSICFGADDRH));
|
||||
}
|
||||
}
|
||||
|
||||
void aplic_reinit_all(void)
|
||||
{
|
||||
struct aplic_data *aplic;
|
||||
|
||||
sbi_list_for_each_entry(aplic, &aplic_list, node)
|
||||
aplic_init(aplic);
|
||||
}
|
||||
|
||||
static void aplic_writel_msicfg(struct aplic_msicfg_data *msicfg,
|
||||
void *msicfgaddr, void *msicfgaddrH)
|
||||
{
|
||||
u32 val;
|
||||
unsigned long base_ppn;
|
||||
|
||||
/* Check if MSI config is already locked */
|
||||
if (readl(msicfgaddrH) & APLIC_xMSICFGADDRH_L)
|
||||
return;
|
||||
|
||||
/* Compute the MSI base PPN */
|
||||
base_ppn = msicfg->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
|
||||
base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(msicfg->lhxs);
|
||||
@@ -168,9 +248,8 @@ static int aplic_check_msicfg(struct aplic_msicfg_data *msicfg)
|
||||
int aplic_cold_irqchip_init(struct aplic_data *aplic)
|
||||
{
|
||||
int rc;
|
||||
u32 i, j, tmp;
|
||||
struct aplic_delegate_data *deleg;
|
||||
u32 first_deleg_irq, last_deleg_irq;
|
||||
u32 first_deleg_irq, last_deleg_irq, i;
|
||||
|
||||
/* Sanity checks */
|
||||
if (!aplic ||
|
||||
@@ -188,81 +267,24 @@ int aplic_cold_irqchip_init(struct aplic_data *aplic)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Set domain configuration to 0 */
|
||||
writel(0, (void *)(aplic->addr + APLIC_DOMAINCFG));
|
||||
|
||||
/* Disable all interrupts */
|
||||
for (i = 0; i <= aplic->num_source; i += 32)
|
||||
writel(-1U, (void *)(aplic->addr + APLIC_CLRIE_BASE +
|
||||
(i / 32) * sizeof(u32)));
|
||||
|
||||
/* Set interrupt type and priority for all interrupts */
|
||||
for (i = 1; i <= aplic->num_source; i++) {
|
||||
/* Set IRQ source configuration to 0 */
|
||||
writel(0, (void *)(aplic->addr + APLIC_SOURCECFG_BASE +
|
||||
(i - 1) * sizeof(u32)));
|
||||
/* Set IRQ target hart index and priority to 1 */
|
||||
writel(APLIC_DEFAULT_PRIORITY, (void *)(aplic->addr +
|
||||
APLIC_TARGET_BASE +
|
||||
(i - 1) * sizeof(u32)));
|
||||
}
|
||||
|
||||
/* Configure IRQ delegation */
|
||||
first_deleg_irq = -1U;
|
||||
last_deleg_irq = 0;
|
||||
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
|
||||
deleg = &aplic->delegate[i];
|
||||
if (!deleg->first_irq || !deleg->last_irq)
|
||||
continue;
|
||||
if (aplic->num_source < deleg->first_irq ||
|
||||
aplic->num_source < deleg->last_irq)
|
||||
continue;
|
||||
if (APLIC_SOURCECFG_CHILDIDX_MASK < deleg->child_index)
|
||||
continue;
|
||||
if (deleg->first_irq > deleg->last_irq) {
|
||||
tmp = deleg->first_irq;
|
||||
deleg->first_irq = deleg->last_irq;
|
||||
deleg->last_irq = tmp;
|
||||
}
|
||||
if (deleg->first_irq < first_deleg_irq)
|
||||
first_deleg_irq = deleg->first_irq;
|
||||
if (last_deleg_irq < deleg->last_irq)
|
||||
last_deleg_irq = deleg->last_irq;
|
||||
for (j = deleg->first_irq; j <= deleg->last_irq; j++)
|
||||
writel(APLIC_SOURCECFG_D | deleg->child_index,
|
||||
(void *)(aplic->addr + APLIC_SOURCECFG_BASE +
|
||||
(j - 1) * sizeof(u32)));
|
||||
}
|
||||
|
||||
/* Default initialization of IDC structures */
|
||||
for (i = 0; i < aplic->num_idc; i++) {
|
||||
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
|
||||
i * APLIC_IDC_SIZE + APLIC_IDC_IDELIVERY));
|
||||
writel(0, (void *)(aplic->addr + APLIC_IDC_BASE +
|
||||
i * APLIC_IDC_SIZE + APLIC_IDC_IFORCE));
|
||||
writel(APLIC_DISABLE_ITHRESHOLD, (void *)(aplic->addr +
|
||||
APLIC_IDC_BASE +
|
||||
(i * APLIC_IDC_SIZE) +
|
||||
APLIC_IDC_ITHRESHOLD));
|
||||
}
|
||||
|
||||
/* MSI configuration */
|
||||
if (aplic->targets_mmode && aplic->has_msicfg_mmode) {
|
||||
aplic_writel_msicfg(&aplic->msicfg_mmode,
|
||||
(void *)(aplic->addr + APLIC_MMSICFGADDR),
|
||||
(void *)(aplic->addr + APLIC_MMSICFGADDRH));
|
||||
}
|
||||
if (aplic->targets_mmode && aplic->has_msicfg_smode) {
|
||||
aplic_writel_msicfg(&aplic->msicfg_smode,
|
||||
(void *)(aplic->addr + APLIC_SMSICFGADDR),
|
||||
(void *)(aplic->addr + APLIC_SMSICFGADDRH));
|
||||
}
|
||||
/* Init the APLIC registers */
|
||||
aplic_init(aplic);
|
||||
|
||||
/*
|
||||
* Add APLIC region to the root domain if:
|
||||
* 1) It targets M-mode of any HART directly or via MSIs
|
||||
* 2) All interrupts are delegated to some child APLIC
|
||||
*/
|
||||
first_deleg_irq = -1U;
|
||||
last_deleg_irq = 0;
|
||||
for (i = 0; i < APLIC_MAX_DELEGATE; i++) {
|
||||
deleg = &aplic->delegate[i];
|
||||
if (deleg->first_irq < first_deleg_irq)
|
||||
first_deleg_irq = deleg->first_irq;
|
||||
if (last_deleg_irq < deleg->last_irq)
|
||||
last_deleg_irq = deleg->last_irq;
|
||||
}
|
||||
|
||||
if (aplic->targets_mmode ||
|
||||
((first_deleg_irq < last_deleg_irq) &&
|
||||
(last_deleg_irq == aplic->num_source) &&
|
||||
@@ -278,5 +300,8 @@ int aplic_cold_irqchip_init(struct aplic_data *aplic)
|
||||
/* Register irqchip device */
|
||||
sbi_irqchip_add_device(&aplic->irqchip);
|
||||
|
||||
/* Attach to the aplic list */
|
||||
sbi_list_add_tail(&aplic->node, &aplic_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -199,6 +199,7 @@ static void imsic_ipi_send(u32 hart_index)
|
||||
|
||||
static struct sbi_ipi_device imsic_ipi_device = {
|
||||
.name = "aia-imsic",
|
||||
.rating = 300,
|
||||
.ipi_send = imsic_ipi_send
|
||||
};
|
||||
|
||||
@@ -393,7 +394,7 @@ int imsic_cold_irqchip_init(struct imsic_data *imsic)
|
||||
sbi_irqchip_add_device(&imsic_device);
|
||||
|
||||
/* Register IPI device */
|
||||
sbi_ipi_set_device(&imsic_ipi_device);
|
||||
sbi_ipi_add_device(&imsic_ipi_device);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ void plic_suspend(void)
|
||||
return;
|
||||
|
||||
sbi_for_each_hartindex(h) {
|
||||
u32 context_id = plic->context_map[h][PLIC_S_CONTEXT];
|
||||
s16 context_id = plic->context_map[h][PLIC_S_CONTEXT];
|
||||
|
||||
if (context_id < 0)
|
||||
continue;
|
||||
@@ -167,7 +167,7 @@ void plic_resume(void)
|
||||
return;
|
||||
|
||||
sbi_for_each_hartindex(h) {
|
||||
u32 context_id = plic->context_map[h][PLIC_S_CONTEXT];
|
||||
s16 context_id = plic->context_map[h][PLIC_S_CONTEXT];
|
||||
|
||||
if (context_id < 0)
|
||||
continue;
|
||||
|
||||
@@ -22,6 +22,18 @@ config FDT_MPXY_RPMI_SYSMSI
|
||||
bool "MPXY driver for RPMI system MSI service group"
|
||||
default n
|
||||
|
||||
config FDT_MPXY_RPMI_VOLTAGE
|
||||
bool "MPXY driver for RPMI voltage service group"
|
||||
default n
|
||||
|
||||
config FDT_MPXY_RPMI_DEVICE_POWER
|
||||
bool "MPXY driver for RPMI device power service group"
|
||||
default n
|
||||
|
||||
config FDT_MPXY_RPMI_PERFORMANCE
|
||||
bool "MPXY driver for RPMI performance service group"
|
||||
default n
|
||||
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
||||
56
lib/utils/mpxy/fdt_mpxy_rpmi_device_power.c
Normal file
56
lib/utils/mpxy/fdt_mpxy_rpmi_device_power.c
Normal file
@@ -0,0 +1,56 @@
|
||||
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
|
||||
|
||||
static struct mpxy_rpmi_service_data dpwr_services[] = {
|
||||
{
|
||||
.id = RPMI_DPWR_SRV_ENABLE_NOTIFICATION,
|
||||
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_DPWR_SRV_GET_NUM_DOMAINS,
|
||||
.min_tx_len = 0,
|
||||
.max_tx_len = 0,
|
||||
.min_rx_len = sizeof(struct rpmi_dpwr_get_num_domain_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_dpwr_get_num_domain_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_DPWR_SRV_GET_ATTRIBUTES,
|
||||
.min_tx_len = sizeof(struct rpmi_dpwr_get_attrs_req),
|
||||
.max_tx_len = sizeof(struct rpmi_dpwr_get_attrs_req),
|
||||
.min_rx_len = sizeof(struct rpmi_dpwr_get_attrs_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_dpwr_get_attrs_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_DPWR_SRV_SET_STATE,
|
||||
.min_tx_len = sizeof(struct rpmi_dpwr_set_state_req),
|
||||
.max_tx_len = sizeof(struct rpmi_dpwr_set_state_req),
|
||||
.min_rx_len = sizeof(struct rpmi_dpwr_set_state_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_dpwr_set_state_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_DPWR_SRV_GET_STATE,
|
||||
.min_tx_len = sizeof(struct rpmi_dpwr_get_state_req),
|
||||
.max_tx_len = sizeof(struct rpmi_dpwr_get_state_req),
|
||||
.min_rx_len = sizeof(struct rpmi_dpwr_get_state_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_dpwr_get_state_resp),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct mpxy_rpmi_mbox_data dpwr_data = {
|
||||
.servicegrp_id = RPMI_SRVGRP_DEVICE_POWER,
|
||||
.num_services = RPMI_DPWR_SRV_MAX_COUNT,
|
||||
.service_data = dpwr_services,
|
||||
};
|
||||
|
||||
static const struct fdt_match dpwr_match[] = {
|
||||
{ .compatible = "riscv,rpmi-mpxy-device-power", .data = &dpwr_data },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_mpxy_rpmi_device_power = {
|
||||
.experimental = true,
|
||||
.match_table = dpwr_match,
|
||||
.init = mpxy_rpmi_mbox_init,
|
||||
};
|
||||
91
lib/utils/mpxy/fdt_mpxy_rpmi_performance.c
Normal file
91
lib/utils/mpxy/fdt_mpxy_rpmi_performance.c
Normal file
@@ -0,0 +1,91 @@
|
||||
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
|
||||
|
||||
static struct mpxy_rpmi_service_data performance_services[] = {
|
||||
{
|
||||
.id = RPMI_PERF_SRV_ENABLE_NOTIFICATION,
|
||||
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_NUM_DOMAINS,
|
||||
.min_tx_len = 0,
|
||||
.max_tx_len = 0,
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_num_domain_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_num_domain_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_ATTRIBUTES,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_get_attrs_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_get_attrs_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_attrs_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_attrs_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_SUPPORTED_LEVELS,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_get_supported_level_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_get_supported_level_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_supported_level_resp),
|
||||
.max_rx_len = -1U,
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_LEVEL,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_get_level_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_get_level_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_level_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_level_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_SET_LEVEL,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_set_level_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_set_level_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_set_level_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_set_level_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_LIMIT,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_get_limit_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_get_limit_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_limit_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_limit_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_SET_LIMIT,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_set_limit_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_set_limit_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_set_limit_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_set_limit_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_FAST_CHANNEL_REGION,
|
||||
.min_tx_len = 0,
|
||||
.max_tx_len = 0,
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_fast_chn_region_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_fast_chn_region_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_PERF_SRV_GET_FAST_CHANNEL_ATTRIBUTES,
|
||||
.min_tx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_req),
|
||||
.max_tx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_req),
|
||||
.min_rx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_perf_get_fast_chn_attr_resp),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct mpxy_rpmi_mbox_data performance_data = {
|
||||
.servicegrp_id = RPMI_SRVGRP_PERFORMANCE ,
|
||||
.num_services = RPMI_PERF_SRV_MAX_COUNT,
|
||||
.service_data = performance_services,
|
||||
};
|
||||
|
||||
static const struct fdt_match performance_match[] = {
|
||||
{ .compatible = "riscv,rpmi-mpxy-performance", .data = &performance_data },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_mpxy_rpmi_performance = {
|
||||
.experimental = true,
|
||||
.match_table = performance_match,
|
||||
.init = mpxy_rpmi_mbox_init,
|
||||
};
|
||||
@@ -57,7 +57,8 @@ static int mpxy_rpmi_sysmis_xfer(void *context, struct mbox_chan *chan,
|
||||
sys_msi_address |= ((u64)le32_to_cpu(((u32 *)xfer->tx)[2])) << 32;
|
||||
if (!sbi_domain_check_addr_range(sbi_domain_thishart_ptr(),
|
||||
sys_msi_address, 0x4, PRV_S,
|
||||
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) {
|
||||
SBI_DOMAIN_READ | SBI_DOMAIN_WRITE |
|
||||
SBI_DOMAIN_MMIO)) {
|
||||
((u32 *)xfer->rx)[0] = cpu_to_le32(RPMI_ERR_INVALID_ADDR);
|
||||
args->rx_data_len = sizeof(u32);
|
||||
break;
|
||||
|
||||
77
lib/utils/mpxy/fdt_mpxy_rpmi_voltage.c
Normal file
77
lib/utils/mpxy/fdt_mpxy_rpmi_voltage.c
Normal file
@@ -0,0 +1,77 @@
|
||||
#include <sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h>
|
||||
|
||||
static struct mpxy_rpmi_service_data voltage_services[] = {
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_ENABLE_NOTIFICATION,
|
||||
.min_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.max_tx_len = sizeof(struct rpmi_enable_notification_req),
|
||||
.min_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_enable_notification_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_GET_NUM_DOMAINS,
|
||||
.min_tx_len = 0,
|
||||
.max_tx_len = 0,
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_get_num_domains_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_get_num_domains_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_GET_ATTRIBUTES,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_get_attributes_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_get_attributes_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_get_attributes_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_get_attributes_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_GET_SUPPORTED_LEVELS,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_get_supported_rate_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_get_supported_rate_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_get_supported_rate_resp),
|
||||
.max_rx_len = -1U,
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_SET_CONFIG,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_set_config_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_set_config_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_set_config_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_set_config_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_GET_CONFIG,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_get_config_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_get_config_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_get_config_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_get_config_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_SET_LEVEL,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_set_level_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_set_level_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_set_level_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_set_level_resp),
|
||||
},
|
||||
{
|
||||
.id = RPMI_VOLTAGE_SRV_GET_LEVEL,
|
||||
.min_tx_len = sizeof(struct rpmi_voltage_get_level_req),
|
||||
.max_tx_len = sizeof(struct rpmi_voltage_get_level_req),
|
||||
.min_rx_len = sizeof(struct rpmi_voltage_get_level_resp),
|
||||
.max_rx_len = sizeof(struct rpmi_voltage_get_level_resp),
|
||||
},
|
||||
};
|
||||
|
||||
static const struct mpxy_rpmi_mbox_data voltage_data = {
|
||||
.servicegrp_id = RPMI_SRVGRP_VOLTAGE,
|
||||
.num_services = RPMI_VOLTAGE_SRV_MAX_COUNT,
|
||||
.service_data = voltage_services,
|
||||
};
|
||||
|
||||
static const struct fdt_match voltage_match[] = {
|
||||
{ .compatible = "riscv,rpmi-mpxy-voltage", .data = &voltage_data },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_mpxy_rpmi_voltage = {
|
||||
.experimental = true,
|
||||
.match_table = voltage_match,
|
||||
.init = mpxy_rpmi_mbox_init,
|
||||
};
|
||||
@@ -15,5 +15,14 @@ libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_MBOX) += mpxy/fdt_mpxy_rpmi_mbox.o
|
||||
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += fdt_mpxy_rpmi_clock
|
||||
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_CLOCK) += mpxy/fdt_mpxy_rpmi_clock.o
|
||||
|
||||
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_PERFORMANCE) += fdt_mpxy_rpmi_performance
|
||||
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_PERFORMANCE) += mpxy/fdt_mpxy_rpmi_performance.o
|
||||
|
||||
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += fdt_mpxy_rpmi_sysmsi
|
||||
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_SYSMSI) += mpxy/fdt_mpxy_rpmi_sysmsi.o
|
||||
|
||||
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_VOLTAGE) += fdt_mpxy_rpmi_voltage
|
||||
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_VOLTAGE) += mpxy/fdt_mpxy_rpmi_voltage.o
|
||||
|
||||
carray-fdt_mpxy_drivers-$(CONFIG_FDT_MPXY_RPMI_DEVICE_POWER) += fdt_mpxy_rpmi_device_power
|
||||
libsbiutils-objs-$(CONFIG_FDT_MPXY_RPMI_DEVICE_POWER) += mpxy/fdt_mpxy_rpmi_device_power.o
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
* Rahul Pathak <rpathak@ventanamicro.com>
|
||||
*/
|
||||
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_system.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
@@ -56,6 +57,8 @@ static void rpmi_do_system_reset(u32 reset_type)
|
||||
if (ret)
|
||||
sbi_printf("system reset failed [type: %d]: ret: %d\n",
|
||||
reset_type, ret);
|
||||
|
||||
sbi_hart_hang();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi_utils/serial/sifive-uart.h>
|
||||
|
||||
/* clang-format off */
|
||||
@@ -111,5 +112,7 @@ int sifive_uart_init(unsigned long base, u32 in_freq, u32 baudrate)
|
||||
|
||||
sbi_console_set_device(&sifive_console);
|
||||
|
||||
return 0;
|
||||
return sbi_domain_root_add_memrange(base, PAGE_SIZE, PAGE_SIZE,
|
||||
(SBI_DOMAIN_MEMREGION_MMIO |
|
||||
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW));
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
/* clang-format off */
|
||||
|
||||
#define UART_RBR_OFFSET 0 /* In: Recieve Buffer Register */
|
||||
#define UART_RBR_OFFSET 0 /* In: Receive Buffer Register */
|
||||
#define UART_THR_OFFSET 0 /* Out: Transmitter Holding Register */
|
||||
#define UART_DLL_OFFSET 0 /* Out: Divisor Latch Low */
|
||||
#define UART_IER_OFFSET 1 /* I/O: Interrupt Enable Register */
|
||||
@@ -133,9 +133,8 @@ int uart8250_init(unsigned long base, u32 in_freq, u32 baudrate, u32 reg_shift,
|
||||
set_reg(UART_FCR_OFFSET, 0x01);
|
||||
/* No modem control DTR RTS */
|
||||
set_reg(UART_MCR_OFFSET, 0x00);
|
||||
/* Clear line status */
|
||||
get_reg(UART_LSR_OFFSET);
|
||||
/* Read receive buffer */
|
||||
/* Clear line status and read receive buffer */
|
||||
if (get_reg(UART_LSR_OFFSET) & UART_LSR_DR)
|
||||
get_reg(UART_RBR_OFFSET);
|
||||
/* Set scratchpad */
|
||||
set_reg(UART_SCR_OFFSET, 0x00);
|
||||
|
||||
@@ -14,6 +14,10 @@ config FDT_SUSPEND_RPMI
|
||||
depends on FDT_MAILBOX && RPMI_MAILBOX
|
||||
default n
|
||||
|
||||
config FDT_SUSPEND_SIFIVE_SMC0
|
||||
bool "FDT SIFIVE SMC0 suspend driver"
|
||||
depends on FDT_HSM_SIFIVE_TMC0 && IRQCHIP_APLIC
|
||||
default n
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
||||
318
lib/utils/suspend/fdt_suspend_sifive_smc0.c
Normal file
318
lib/utils/suspend/fdt_suspend_sifive_smc0.c
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2025 SiFive
|
||||
*/
|
||||
|
||||
#include <libfdt.h>
|
||||
#include <sbi/riscv_asm.h>
|
||||
#include <sbi/riscv_io.h>
|
||||
#include <sbi/sbi_console.h>
|
||||
#include <sbi/sbi_domain.h>
|
||||
#include <sbi/sbi_error.h>
|
||||
#include <sbi/sbi_hart.h>
|
||||
#include <sbi/sbi_hsm.h>
|
||||
#include <sbi/sbi_system.h>
|
||||
#include <sbi/sbi_timer.h>
|
||||
#include <sbi_utils/cache/fdt_cmo_helper.h>
|
||||
#include <sbi_utils/fdt/fdt_driver.h>
|
||||
#include <sbi_utils/fdt/fdt_helper.h>
|
||||
#include <sbi_utils/hsm/fdt_hsm_sifive_inst.h>
|
||||
#include <sbi_utils/hsm/fdt_hsm_sifive_tmc0.h>
|
||||
#include <sbi_utils/irqchip/aplic.h>
|
||||
#include <sbi_utils/timer/aclint_mtimer.h>
|
||||
|
||||
#define SIFIVE_SMC_PGPREP_OFF 0x0
|
||||
#define SIFIVE_SMC_PG_OFF 0x4
|
||||
#define SIFIVE_SMC_CCTIMER_OFF 0xc
|
||||
#define SIFIVE_SMC_RESUMEPC_LO_OFF 0x10
|
||||
#define SIFIVE_SMC_RESUMEPC_HI_OFF 0x14
|
||||
#define SIFIVE_SMC_SYNC_PMC_OFF 0x24
|
||||
#define SIFIVE_SMC_CYCLECOUNT_LO_OFF 0x28
|
||||
#define SIFIVE_SMC_CYCLECOUNT_HI_OFF 0x2c
|
||||
#define SIFIVE_SMC_WFI_UNCORE_CG_OFF 0x50
|
||||
|
||||
#define SIFIVE_SMC_PGPREP_ENA_REQ BIT(31)
|
||||
#define SIFIVE_SMC_PGPREP_ENA_ACK BIT(30)
|
||||
#define SIFIVE_SMC_PGPREP_DIS_REQ BIT(29)
|
||||
#define SIFIVE_SMC_PGPREP_DIS_ACK BIT(29)
|
||||
#define SIFIVE_SMC_PGPREP_FRONTNOTQ BIT(19)
|
||||
#define SIFIVE_SMC_PGPREP_CLFPNOTQ BIT(18)
|
||||
#define SIFIVE_SMC_PGPREP_PMCENAERR BIT(17)
|
||||
#define SIFIVE_SMC_PGPREP_WAKE_DETECT BIT(16)
|
||||
#define SIFIVE_SMC_PGPREP_BUSERR BIT(15)
|
||||
#define SIFIVE_SMC_PGPREP_EARLY_ABORT BIT(3)
|
||||
#define SIFIVE_SMC_PGPREP_INTERNAL_ABORT BIT(2)
|
||||
#define SIFIVE_SMC_PGPREP_ENARSP (SIFIVE_SMC_PGPREP_FRONTNOTQ | \
|
||||
SIFIVE_SMC_PGPREP_CLFPNOTQ | \
|
||||
SIFIVE_SMC_PGPREP_PMCENAERR | \
|
||||
SIFIVE_SMC_PGPREP_WAKE_DETECT | \
|
||||
SIFIVE_SMC_PGPREP_BUSERR)
|
||||
|
||||
#define SIFIVE_SMC_PGPREP_ABORT (SIFIVE_SMC_PGPREP_EARLY_ABORT | \
|
||||
SIFIVE_SMC_PGPREP_INTERNAL_ABORT)
|
||||
|
||||
#define SIFIVE_SMC_PG_ENA_REQ BIT(31)
|
||||
#define SIFIVE_SMC_PG_WARM_RESET BIT(1)
|
||||
|
||||
#define SIFIVE_SMC_SYNCPMC_SYNC_REQ BIT(31)
|
||||
#define SIFIVE_SMC_SYNCPMC_SYNC_WREQ BIT(30)
|
||||
#define SIFIVE_SMC_SYNCPMC_SYNC_ACK BIT(29)
|
||||
|
||||
static struct aclint_mtimer_data smc_sync_timer;
|
||||
static unsigned long smc0_base;
|
||||
|
||||
static void sifive_smc0_set_pmcsync(char regid, bool write_mode)
|
||||
{
|
||||
unsigned long addr = smc0_base + SIFIVE_SMC_SYNC_PMC_OFF;
|
||||
u32 v = regid | SIFIVE_SMC_SYNCPMC_SYNC_REQ;
|
||||
|
||||
if (write_mode)
|
||||
v |= SIFIVE_SMC_SYNCPMC_SYNC_WREQ;
|
||||
|
||||
writel(v, (void *)addr);
|
||||
while (!(readl((void *)addr) & SIFIVE_SMC_SYNCPMC_SYNC_ACK));
|
||||
}
|
||||
|
||||
static u64 sifive_smc0_time_read(volatile u64 *addr)
|
||||
{
|
||||
u32 lo, hi;
|
||||
|
||||
do {
|
||||
sifive_smc0_set_pmcsync(SIFIVE_SMC_CYCLECOUNT_LO_OFF, false);
|
||||
sifive_smc0_set_pmcsync(SIFIVE_SMC_CYCLECOUNT_HI_OFF, false);
|
||||
hi = readl_relaxed((u32 *)addr + 1);
|
||||
lo = readl_relaxed((u32 *)addr);
|
||||
} while (hi != readl_relaxed((u32 *)addr + 1));
|
||||
|
||||
return ((u64)hi << 32) | (u64)lo;
|
||||
}
|
||||
|
||||
static void sifive_smc0_set_resumepc(physical_addr_t raddr)
|
||||
{
|
||||
/* Set resumepc_lo */
|
||||
writel((u32)raddr, (void *)(smc0_base + SIFIVE_SMC_RESUMEPC_LO_OFF));
|
||||
/* copy resumepc_lo from SMC to PMC */
|
||||
sifive_smc0_set_pmcsync(SIFIVE_SMC_RESUMEPC_LO_OFF, true);
|
||||
#if __riscv_xlen > 32
|
||||
/* Set resumepc_hi */
|
||||
writel((u32)(raddr >> 32), (void *)(smc0_base + SIFIVE_SMC_RESUMEPC_HI_OFF));
|
||||
/* copy resumepc_hi from SMC to PMC */
|
||||
sifive_smc0_set_pmcsync(SIFIVE_SMC_RESUMEPC_HI_OFF, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
static u32 sifive_smc0_get_pgprep_enarsp(void)
|
||||
{
|
||||
u32 v = readl((void *)(smc0_base + SIFIVE_SMC_PGPREP_OFF));
|
||||
|
||||
return v & SIFIVE_SMC_PGPREP_ENARSP;
|
||||
}
|
||||
|
||||
static void sifive_smc0_set_pgprep_disreq(void)
|
||||
{
|
||||
unsigned long addr = smc0_base + SIFIVE_SMC_PGPREP_OFF;
|
||||
u32 v = readl((void *)addr);
|
||||
|
||||
writel(v | SIFIVE_SMC_PGPREP_DIS_REQ, (void *)addr);
|
||||
while (!(readl((void *)addr) & SIFIVE_SMC_PGPREP_DIS_ACK));
|
||||
}
|
||||
|
||||
static u32 sifive_smc0_set_pgprep_enareq(void)
|
||||
{
|
||||
unsigned long addr = smc0_base + SIFIVE_SMC_PGPREP_OFF;
|
||||
u32 v = readl((void *)addr);
|
||||
|
||||
writel(v | SIFIVE_SMC_PGPREP_ENA_REQ, (void *)addr);
|
||||
while (!(readl((void *)addr) & SIFIVE_SMC_PGPREP_ENA_ACK));
|
||||
|
||||
v = readl((void *)addr);
|
||||
|
||||
return v & SIFIVE_SMC_PGPREP_ABORT;
|
||||
}
|
||||
|
||||
static void sifive_smc0_set_pg_enareq(void)
|
||||
{
|
||||
unsigned long addr = smc0_base + SIFIVE_SMC_PG_OFF;
|
||||
u32 v = readl((void *)addr);
|
||||
|
||||
writel(v | SIFIVE_SMC_PG_ENA_REQ, (void *)addr);
|
||||
}
|
||||
|
||||
static inline void sifive_smc0_set_cg(bool enable)
|
||||
{
|
||||
unsigned long addr = smc0_base + SIFIVE_SMC_WFI_UNCORE_CG_OFF;
|
||||
|
||||
if (enable)
|
||||
writel(0, (void *)addr);
|
||||
else
|
||||
writel(1, (void *)addr);
|
||||
}
|
||||
|
||||
static int sifive_smc0_prep(void)
|
||||
{
|
||||
const struct sbi_domain *dom = &root;
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
unsigned long i;
|
||||
int rc;
|
||||
u32 target;
|
||||
|
||||
if (!smc0_base)
|
||||
return SBI_ENODEV;
|
||||
|
||||
/* Prevent all secondary tiles from waking up from PG state */
|
||||
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
|
||||
target = sbi_hartindex_to_hartid(i);
|
||||
if (target != current_hartid()) {
|
||||
rc = sifive_tmc0_set_wakemask_enareq(target);
|
||||
if (rc) {
|
||||
sbi_printf("Fail to enable wakemask for hart %d\n",
|
||||
target);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if all secondary tiles enter PG state */
|
||||
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
|
||||
target = sbi_hartindex_to_hartid(i);
|
||||
if (target != current_hartid() &&
|
||||
!sifive_tmc0_is_pg(target)) {
|
||||
sbi_printf("Hart %d not in the PG state\n", target);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
rc = sifive_smc0_set_pgprep_enareq();
|
||||
if (rc) {
|
||||
sbi_printf("SMC0 error: abort code: 0x%x\n", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rc = sifive_smc0_get_pgprep_enarsp();
|
||||
if (rc) {
|
||||
sifive_smc0_set_pgprep_disreq();
|
||||
sbi_printf("SMC0 error: error response code: 0x%x\n", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sifive_smc0_set_resumepc(scratch->warmboot_addr);
|
||||
return SBI_OK;
|
||||
fail:
|
||||
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
|
||||
target = sbi_hartindex_to_hartid(i);
|
||||
if (target != current_hartid())
|
||||
sifive_tmc0_set_wakemask_disreq(target);
|
||||
}
|
||||
|
||||
return SBI_EFAIL;
|
||||
}
|
||||
|
||||
static int sifive_smc0_enter(void)
|
||||
{
|
||||
const struct sbi_domain *dom = &root;
|
||||
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
|
||||
unsigned long i;
|
||||
u32 target, rc;
|
||||
|
||||
/* Flush cache and check if there is wake detect or bus error */
|
||||
if (fdt_cmo_llc_flush_all() &&
|
||||
sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CFLUSH_D_L1))
|
||||
sifive_cflush();
|
||||
|
||||
rc = sifive_smc0_get_pgprep_enarsp();
|
||||
if (rc) {
|
||||
sbi_printf("SMC0 error: error response code: 0x%x\n", rc);
|
||||
rc = SBI_EFAIL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_XSIFIVE_CEASE)) {
|
||||
sifive_smc0_set_pg_enareq();
|
||||
while (1)
|
||||
sifive_cease();
|
||||
}
|
||||
|
||||
rc = SBI_ENOTSUPP;
|
||||
fail:
|
||||
sifive_smc0_set_pgprep_disreq();
|
||||
sbi_hartmask_for_each_hartindex(i, dom->possible_harts) {
|
||||
target = sbi_hartindex_to_hartid(i);
|
||||
if (target != current_hartid())
|
||||
sifive_tmc0_set_wakemask_disreq(target);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sifive_smc0_pg(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = sifive_smc0_prep();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return sifive_smc0_enter();
|
||||
}
|
||||
|
||||
static void sifive_smc0_mtime_update(void)
|
||||
{
|
||||
struct aclint_mtimer_data *mt = aclint_get_mtimer_data();
|
||||
|
||||
aclint_mtimer_update(mt, &smc_sync_timer);
|
||||
}
|
||||
|
||||
static int sifive_smc0_system_suspend_check(u32 sleep_type)
|
||||
{
|
||||
return sleep_type == SBI_SUSP_SLEEP_TYPE_SUSPEND ? SBI_OK : SBI_EINVAL;
|
||||
}
|
||||
|
||||
static int sifive_smc0_system_suspend(u32 sleep_type, unsigned long addr)
|
||||
{
|
||||
/* Disable the timer interrupt */
|
||||
sbi_timer_exit(sbi_scratch_thishart_ptr());
|
||||
|
||||
return sifive_smc0_pg();
|
||||
}
|
||||
|
||||
static void sifive_smc0_system_resume(void)
|
||||
{
|
||||
aplic_reinit_all();
|
||||
sifive_smc0_mtime_update();
|
||||
}
|
||||
|
||||
static struct sbi_system_suspend_device smc0_sys_susp = {
|
||||
.name = "Sifive SMC0",
|
||||
.system_suspend_check = sifive_smc0_system_suspend_check,
|
||||
.system_suspend = sifive_smc0_system_suspend,
|
||||
.system_resume = sifive_smc0_system_resume,
|
||||
};
|
||||
|
||||
static int sifive_smc0_probe(const void *fdt, int nodeoff, const struct fdt_match *match)
|
||||
{
|
||||
int rc;
|
||||
u64 addr;
|
||||
|
||||
rc = fdt_get_node_addr_size(fdt, nodeoff, 0, &addr, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
smc0_base = (unsigned long)addr;
|
||||
smc_sync_timer.time_rd = sifive_smc0_time_read;
|
||||
smc_sync_timer.mtime_addr = smc0_base + SIFIVE_SMC_CYCLECOUNT_LO_OFF;
|
||||
|
||||
sbi_system_suspend_set_device(&smc0_sys_susp);
|
||||
sifive_smc0_set_cg(true);
|
||||
|
||||
return SBI_OK;
|
||||
}
|
||||
|
||||
static const struct fdt_match sifive_smc0_match[] = {
|
||||
{ .compatible = "sifive,smc0" },
|
||||
{ },
|
||||
};
|
||||
|
||||
const struct fdt_driver fdt_suspend_sifive_smc0 = {
|
||||
.match_table = sifive_smc0_match,
|
||||
.init = sifive_smc0_probe,
|
||||
};
|
||||
@@ -9,3 +9,6 @@
|
||||
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_SUSPEND_RPMI) += fdt_suspend_rpmi
|
||||
libsbiutils-objs-$(CONFIG_FDT_SUSPEND_RPMI) += suspend/fdt_suspend_rpmi.o
|
||||
|
||||
carray-fdt_early_drivers-$(CONFIG_FDT_SUSPEND_SIFIVE_SMC0) += fdt_suspend_sifive_smc0
|
||||
libsbiutils-objs-$(CONFIG_FDT_SUSPEND_SIFIVE_SMC0) += suspend/fdt_suspend_sifive_smc0.o
|
||||
|
||||
@@ -109,10 +109,34 @@ static struct sbi_timer_device mtimer = {
|
||||
.timer_event_stop = mtimer_event_stop
|
||||
};
|
||||
|
||||
void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
|
||||
struct aclint_mtimer_data *aclint_get_mtimer_data(void)
|
||||
{
|
||||
return mtimer_get_hart_data_ptr(sbi_scratch_thishart_ptr());
|
||||
}
|
||||
|
||||
void aclint_mtimer_update(struct aclint_mtimer_data *mt,
|
||||
struct aclint_mtimer_data *ref)
|
||||
{
|
||||
u64 v1, v2, mv, delta;
|
||||
u64 *mt_time_val, *ref_time_val;
|
||||
|
||||
if (!mt || !ref || !mt->time_rd || !mt->time_wr || !ref->time_rd)
|
||||
return;
|
||||
|
||||
mt_time_val = (void *)mt->mtime_addr;
|
||||
ref_time_val = (void *)ref->mtime_addr;
|
||||
if (!atomic_raw_xchg_ulong(&mt->time_delta_computed, 1)) {
|
||||
v1 = mt->time_rd(mt_time_val);
|
||||
mv = ref->time_rd(ref_time_val);
|
||||
v2 = mt->time_rd(mt_time_val);
|
||||
delta = mv - ((v1 / 2) + (v2 / 2));
|
||||
mt->time_wr(false, mt->time_rd(mt_time_val) + delta,
|
||||
mt_time_val);
|
||||
}
|
||||
}
|
||||
|
||||
void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
|
||||
{
|
||||
struct aclint_mtimer_data *reference;
|
||||
|
||||
/* Sync-up non-shared MTIME if reference is available */
|
||||
@@ -120,17 +144,7 @@ void aclint_mtimer_sync(struct aclint_mtimer_data *mt)
|
||||
return;
|
||||
|
||||
reference = mt->time_delta_reference;
|
||||
mt_time_val = (void *)mt->mtime_addr;
|
||||
ref_time_val = (void *)reference->mtime_addr;
|
||||
if (!atomic_raw_xchg_ulong(&mt->time_delta_computed, 1)) {
|
||||
v1 = mt->time_rd(mt_time_val);
|
||||
mv = reference->time_rd(ref_time_val);
|
||||
v2 = mt->time_rd(mt_time_val);
|
||||
delta = mv - ((v1 / 2) + (v2 / 2));
|
||||
mt->time_wr(false, mt->time_rd(mt_time_val) + delta,
|
||||
mt_time_val);
|
||||
}
|
||||
|
||||
aclint_mtimer_update(mt, reference);
|
||||
}
|
||||
|
||||
void aclint_mtimer_set_reference(struct aclint_mtimer_data *mt,
|
||||
|
||||
@@ -39,6 +39,7 @@ static int timer_mtimer_cold_init(const void *fdt, int nodeoff,
|
||||
struct aclint_mtimer_data *mt;
|
||||
const struct timer_mtimer_quirks *quirks = match->data;
|
||||
bool is_clint = quirks && quirks->is_clint;
|
||||
bool is_ref = false;
|
||||
|
||||
mtn = sbi_zalloc(sizeof(*mtn));
|
||||
if (!mtn)
|
||||
@@ -110,13 +111,16 @@ static int timer_mtimer_cold_init(const void *fdt, int nodeoff,
|
||||
}
|
||||
|
||||
/*
|
||||
* Select first MTIMER device with no associated HARTs as our
|
||||
* reference MTIMER device. This is only a temporary strategy
|
||||
* of selecting reference MTIMER device. In future, we might
|
||||
* define an optional DT property or some other mechanism to
|
||||
* help us select the reference MTIMER device.
|
||||
* If we have a DT property to indicate which MTIMER is the reference,
|
||||
* select the first MTIMER device that has it. Otherwise, select the
|
||||
* first MTIMER device with no associated HARTs as our reference.
|
||||
*/
|
||||
if (!mt->hart_count && !mt_reference) {
|
||||
if (fdt_getprop(fdt, nodeoff, "riscv,reference-mtimer", NULL))
|
||||
is_ref = true;
|
||||
else if (!mt->hart_count)
|
||||
is_ref = true;
|
||||
|
||||
if (is_ref && !mt_reference) {
|
||||
mt_reference = mt;
|
||||
/*
|
||||
* Set reference for already propbed MTIMER devices
|
||||
@@ -153,8 +157,10 @@ static const struct timer_mtimer_quirks thead_aclint_quirks = {
|
||||
};
|
||||
|
||||
static const struct fdt_match timer_mtimer_match[] = {
|
||||
{ .compatible = "mips,p8700-aclint-mtimer" },
|
||||
{ .compatible = "riscv,clint0", .data = &sifive_clint_quirks },
|
||||
{ .compatible = "sifive,clint0", .data = &sifive_clint_quirks },
|
||||
{ .compatible = "sifive,clint2", .data = &sifive_clint_quirks },
|
||||
{ .compatible = "thead,c900-clint", .data = &thead_clint_quirks },
|
||||
{ .compatible = "thead,c900-aclint-mtimer",
|
||||
.data = &thead_aclint_quirks },
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
|
||||
config PLATFORM_ARIANE_FPGA
|
||||
bool
|
||||
select FDT
|
||||
select IPI_MSWI
|
||||
select IRQCHIP_PLIC
|
||||
select SERIAL_UART8250
|
||||
select TIMER_MTIMER
|
||||
default y
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user