1 Commits

Author SHA1 Message Date
Anup Patel
057eb10b6d lib: utils/gpio: Fix RV32 compile error for designware GPIO driver
Currently, we see following compile error in the designeware GPIO driver
for RV32 systems:

lib/utils/gpio/fdt_gpio_designware.c:115:20: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
  115 |         chip->dr = (void *)addr + (bank * 0xc);
      |                    ^
lib/utils/gpio/fdt_gpio_designware.c:116:21: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
  116 |         chip->ext = (void *)addr + (bank * 4) + 0x50;

We fix the above error using an explicit type-cast to 'unsigned long'.

Fixes: 7828eebaaa ("gpio/desginware: add Synopsys DesignWare APB GPIO support")
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Xiang W <wxjstz@126.com>
2023-07-19 11:51:59 +05:30
354 changed files with 5716 additions and 21046 deletions

View File

@@ -1,21 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
# See here for more information about the format and editor support:
# https://editorconfig.org/
root = true
[{*.{c,dts,h,lds,ldS,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
[*.py]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4

View File

@@ -1,26 +0,0 @@
name: 'Repo Lockdown'
on:
pull_request_target:
types: opened
permissions:
pull-requests: write
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/repo-lockdown@v4
with:
pr-comment: |
We have mailing list based patch review so it would be great if you can send these patchs to OpenSBI mailing list.
You need to join OpenSBI mailing list using following link
http://lists.infradead.org/mailman/listinfo/opensbi
Make sure you use "git send-email" to send the patches.
Thanks for your contribution to OpenSBI project.
lock-pr: true
close-pr: true

11
.gitignore vendored
View File

@@ -1,12 +1,3 @@
# ignore anything begin with dot
.*
# exceptions we need even begin with dot
!.clang-format
!.gitignore
!.github
!.editorconfig
# Object files
*.o
*.a
@@ -19,4 +10,4 @@ install/
# Development friendly files
tags
cscope*
*~
*.swp

158
Makefile
View File

@@ -79,7 +79,6 @@ export PYTHONDONTWRITEBYTECODE=1
export KCONFIG_DIR=$(platform_build_dir)/kconfig
export KCONFIG_AUTOLIST=$(KCONFIG_DIR)/auto.list
export KCONFIG_AUTOHEADER=$(KCONFIG_DIR)/autoconf.h
export KCONFIG_AUTOCONFIG=$(KCONFIG_DIR)/auto.conf
export KCONFIG_AUTOCMD=$(KCONFIG_DIR)/auto.conf.cmd
export KCONFIG_CONFIG=$(KCONFIG_DIR)/.config
# Additional exports for include paths in Kconfig files
@@ -91,29 +90,14 @@ endif
# Find library version
OPENSBI_VERSION_MAJOR=`grep "define OPENSBI_VERSION_MAJOR" $(include_dir)/sbi/sbi_version.h | sed 's/.*MAJOR.*\([0-9][0-9]*\)/\1/'`
OPENSBI_VERSION_MINOR=`grep "define OPENSBI_VERSION_MINOR" $(include_dir)/sbi/sbi_version.h | sed 's/.*MINOR.*\([0-9][0-9]*\)/\1/'`
OPENSBI_VERSION_GIT=
# Detect 'git' presence before issuing 'git' commands
GIT_AVAIL := $(shell command -v git 2> /dev/null)
ifneq ($(GIT_AVAIL),)
GIT_DIR := $(shell git rev-parse --git-dir 2> /dev/null)
ifneq ($(GIT_DIR),)
OPENSBI_VERSION_GIT := $(shell if [ -d $(GIT_DIR) ]; then git describe 2> /dev/null; fi)
endif
endif
OPENSBI_VERSION_GIT=$(shell if [ -d $(src_dir)/.git ]; then git describe 2> /dev/null; fi)
# Setup compilation commands
ifneq ($(LLVM),)
ifneq ($(filter %/,$(LLVM)),)
LLVM_PREFIX := $(LLVM)
else ifneq ($(filter -%,$(LLVM)),)
LLVM_SUFFIX := $(LLVM)
endif
CC = $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
AR = $(LLVM_PREFIX)llvm-ar$(LLVM_SUFFIX)
LD = $(LLVM_PREFIX)ld.lld$(LLVM_SUFFIX)
OBJCOPY = $(LLVM_PREFIX)llvm-objcopy$(LLVM_SUFFIX)
CC = clang
AR = llvm-ar
LD = ld.lld
OBJCOPY = llvm-objcopy
else
ifdef CROSS_COMPILE
CC = $(CROSS_COMPILE)gcc
@@ -180,32 +164,14 @@ else
USE_LD_FLAG = -fuse-ld=bfd
endif
REPRODUCIBLE ?= n
ifeq ($(REPRODUCIBLE),y)
REPRODUCIBLE_FLAGS += -ffile-prefix-map=$(src_dir)=
endif
# Check whether the linker supports creating PIEs
OPENSBI_LD_PIE := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) $(USE_LD_FLAG) -fPIE -nostdlib -Wl,-pie -x c /dev/null -o /dev/null >/dev/null 2>&1 && echo y || echo n)
# Check whether the linker supports --exclude-libs
OPENSBI_LD_EXCLUDE_LIBS := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) $(USE_LD_FLAG) "-Wl,--exclude-libs,ALL" -x c /dev/null -o /dev/null >/dev/null 2>&1 && echo y || echo n)
# Check whether the compiler supports -m(no-)save-restore
CC_SUPPORT_SAVE_RESTORE := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -mno-save-restore -x c /dev/null -o /dev/null 2>&1 | grep -e "-save-restore" >/dev/null && echo n || echo y)
# Check whether the compiler supports -m(no-)strict-align
CC_SUPPORT_STRICT_ALIGN := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -mstrict-align -x c /dev/null -o /dev/null 2>&1 | grep -e "-mstrict-align" -e "-mno-unaligned-access" >/dev/null && echo n || echo y)
CC_SUPPORT_SAVE_RESTORE := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -mno-save-restore -x c /dev/null -o /dev/null 2>&1 | grep "\-save\-restore" >/dev/null && echo n || echo y)
# Check whether the assembler and the compiler support the Zicsr and Zifencei extensions
CC_SUPPORT_ZICSR_ZIFENCEI := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -march=rv$(OPENSBI_CC_XLEN)imafd_zicsr_zifencei -x c /dev/null -o /dev/null 2>&1 | grep -e "zicsr" -e "zifencei" > /dev/null && echo n || echo y)
# Check whether the assembler and the compiler support the Vector extension
CC_SUPPORT_VECTOR := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -march=rv$(OPENSBI_CC_XLEN)gv -dM -E -x c /dev/null 2>&1 | grep -q riscv.*vector && echo y || echo n)
ifneq ($(OPENSBI_LD_PIE),y)
$(error Your linker does not support creating PIEs, opensbi requires this.)
endif
CC_SUPPORT_ZICSR_ZIFENCEI := $(shell $(CC) $(CLANG_TARGET) $(RELAX_FLAG) -nostdlib -march=rv$(OPENSBI_CC_XLEN)imafd_zicsr_zifencei -x c /dev/null -o /dev/null 2>&1 | grep "zicsr\|zifencei" > /dev/null && echo n || echo y)
# Build Info:
# OPENSBI_BUILD_TIME_STAMP -- the compilation time stamp
@@ -213,18 +179,16 @@ endif
BUILD_INFO ?= n
ifeq ($(BUILD_INFO),y)
OPENSBI_BUILD_DATE_FMT = +%Y-%m-%d %H:%M:%S %z
ifndef OPENSBI_BUILD_TIME_STAMP
ifdef SOURCE_DATE_EPOCH
OPENSBI_BUILD_TIME_STAMP := $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" \
OPENSBI_BUILD_TIME_STAMP ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" \
"$(OPENSBI_BUILD_DATE_FMT)" 2>/dev/null || \
date -u -r "$(SOURCE_DATE_EPOCH)" \
"$(OPENSBI_BUILD_DATE_FMT)" 2>/dev/null || \
date -u "$(OPENSBI_BUILD_DATE_FMT)")
else
OPENSBI_BUILD_TIME_STAMP := $(shell date "$(OPENSBI_BUILD_DATE_FMT)")
OPENSBI_BUILD_TIME_STAMP ?= $(shell date "$(OPENSBI_BUILD_DATE_FMT)")
endif
endif
OPENSBI_BUILD_COMPILER_VERSION := $(shell $(CC) -v 2>&1 | grep ' version ' | \
OPENSBI_BUILD_COMPILER_VERSION=$(shell $(CC) -v 2>&1 | grep ' version ' | \
sed 's/[[:space:]]*$$//')
endif
@@ -246,28 +210,24 @@ ifdef PLATFORM
menuconfig: $(platform_src_dir)/Kconfig $(src_dir)/Kconfig
$(CMD_PREFIX)mkdir -p $(KCONFIG_DIR)
$(CMD_PREFIX)$(src_dir)/scripts/Kconfiglib/menuconfig.py $(src_dir)/Kconfig
$(CMD_PREFIX)$(src_dir)/scripts/Kconfiglib/genconfig.py --header-path $(KCONFIG_AUTOHEADER) --sync-deps $(KCONFIG_DIR) --file-list $(KCONFIG_AUTOLIST) $(src_dir)/Kconfig
.PHONY: savedefconfig
savedefconfig: $(platform_src_dir)/Kconfig $(src_dir)/Kconfig
$(CMD_PREFIX)mkdir -p $(KCONFIG_DIR)
$(CMD_PREFIX)$(src_dir)/scripts/Kconfiglib/savedefconfig.py --kconfig $(src_dir)/Kconfig --out $(KCONFIG_DIR)/defconfig
$(KCONFIG_CONFIG): $(platform_src_dir)/configs/$(PLATFORM_DEFCONFIG)
$(KCONFIG_CONFIG): $(platform_src_dir)/configs/$(PLATFORM_DEFCONFIG) $(platform_src_dir)/Kconfig $(src_dir)/Kconfig
$(CMD_PREFIX)mkdir -p $(KCONFIG_DIR)
$(CMD_PREFIX)$(src_dir)/scripts/Kconfiglib/defconfig.py --kconfig $(src_dir)/Kconfig $(platform_src_dir)/configs/$(PLATFORM_DEFCONFIG)
$(KCONFIG_AUTOCONFIG): $(KCONFIG_CONFIG)
$(CMD_PREFIX)$(src_dir)/scripts/Kconfiglib/genconfig.py --header-path $(KCONFIG_AUTOHEADER) --sync-deps $(KCONFIG_DIR) --file-list $(KCONFIG_AUTOLIST) $(src_dir)/Kconfig
$(KCONFIG_AUTOHEADER): $(KCONFIG_AUTOCONFIG);
$(KCONFIG_AUTOLIST): $(KCONFIG_AUTOCONFIG);
$(KCONFIG_AUTOCMD): $(KCONFIG_AUTOLIST)
$(KCONFIG_AUTOCMD): $(KCONFIG_CONFIG)
$(CMD_PREFIX)mkdir -p $(KCONFIG_DIR)
$(CMD_PREFIX)printf "%s: " $(KCONFIG_CONFIG) > $(KCONFIG_AUTOCMD)
$(CMD_PREFIX)cat $(KCONFIG_AUTOLIST) | tr '\n' ' ' >> $(KCONFIG_AUTOCMD)
include $(KCONFIG_AUTOCONFIG)
include $(KCONFIG_CONFIG)
include $(KCONFIG_AUTOCMD)
endif
@@ -310,9 +270,10 @@ ifndef PLATFORM_RISCV_ABI
endif
ifndef PLATFORM_RISCV_ISA
ifneq ($(PLATFORM_RISCV_TOOLCHAIN_DEFAULT), 1)
PLATFORM_RISCV_ISA := rv$(PLATFORM_RISCV_XLEN)imafdc
ifeq ($(CC_SUPPORT_ZICSR_ZIFENCEI), y)
PLATFORM_RISCV_ISA := $(PLATFORM_RISCV_ISA)_zicsr_zifencei
PLATFORM_RISCV_ISA = rv$(PLATFORM_RISCV_XLEN)imafdc_zicsr_zifencei
else
PLATFORM_RISCV_ISA = rv$(PLATFORM_RISCV_XLEN)imafdc
endif
else
PLATFORM_RISCV_ISA = $(OPENSBI_CC_ISA)
@@ -363,7 +324,6 @@ ifeq ($(BUILD_INFO),y)
GENFLAGS += -DOPENSBI_BUILD_TIME_STAMP="\"$(OPENSBI_BUILD_TIME_STAMP)\""
GENFLAGS += -DOPENSBI_BUILD_COMPILER_VERSION="\"$(OPENSBI_BUILD_COMPILER_VERSION)\""
endif
GENFLAGS += -include $(include_dir)/sbi/sbi_visibility.h
ifdef PLATFORM
GENFLAGS += -include $(KCONFIG_AUTOHEADER)
endif
@@ -371,26 +331,23 @@ GENFLAGS += $(libsbiutils-genflags-y)
GENFLAGS += $(platform-genflags-y)
GENFLAGS += $(firmware-genflags-y)
CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing -ffunction-sections -fdata-sections
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
CFLAGS += $(REPRODUCIBLE_FLAGS)
# Optionally supported flags
ifeq ($(CC_SUPPORT_VECTOR),y)
CFLAGS += -DOPENSBI_CC_SUPPORT_VECTOR
CFLAGS = -g -Wall -Werror -ffreestanding -nostdlib -fno-stack-protector -fno-strict-aliasing
ifneq ($(DEBUG),)
CFLAGS += -O0
else
CFLAGS += -O2
endif
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align
# enable -m(no-)save-restore option by CC_SUPPORT_SAVE_RESTORE
ifeq ($(CC_SUPPORT_SAVE_RESTORE),y)
CFLAGS += -mno-save-restore
endif
ifeq ($(CC_SUPPORT_STRICT_ALIGN),y)
CFLAGS += -mstrict-align
endif
CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
CFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
CFLAGS += $(RELAX_FLAG)
CFLAGS += $(GENFLAGS)
CFLAGS += $(platform-cflags-y)
CFLAGS += -fPIE -pie
CFLAGS += -fno-pie -no-pie
CFLAGS += $(firmware-cflags-y)
CPPFLAGS += $(GENFLAGS)
@@ -398,16 +355,11 @@ CPPFLAGS += $(platform-cppflags-y)
CPPFLAGS += $(firmware-cppflags-y)
ASFLAGS = -g -Wall -nostdlib
ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
ASFLAGS += -fPIE
ASFLAGS += $(REPRODUCIBLE_FLAGS)
# Optionally supported flags
ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align
# enable -m(no-)save-restore option by CC_SUPPORT_SAVE_RESTORE
ifeq ($(CC_SUPPORT_SAVE_RESTORE),y)
ASFLAGS += -mno-save-restore
endif
ifeq ($(CC_SUPPORT_STRICT_ALIGN),y)
ASFLAGS += -mstrict-align
endif
ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
ASFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
ASFLAGS += $(RELAX_FLAG)
@@ -423,12 +375,7 @@ ASFLAGS += $(firmware-asflags-y)
ARFLAGS = rcs
ELFFLAGS += $(USE_LD_FLAG)
ELFFLAGS += -Wl,--gc-sections
ifeq ($(OPENSBI_LD_EXCLUDE_LIBS),y)
ELFFLAGS += -Wl,--exclude-libs,ALL
endif
ELFFLAGS += -Wl,--build-id=none
ELFFLAGS += -Wl,--no-dynamic-linker -Wl,-pie
ELFFLAGS += $(platform-ldflags-y)
ELFFLAGS += $(firmware-ldflags-y)
@@ -442,13 +389,6 @@ MERGEFLAGS += -m elf$(PLATFORM_RISCV_XLEN)lriscv
DTSCPPFLAGS = $(CPPFLAGS) -nostdinc -nostdlib -fno-builtin -D__DTS__ -x assembler-with-cpp
ifneq ($(DEBUG),)
CFLAGS += -O0
ELFFLAGS += -Wl,--print-gc-sections
else
CFLAGS += -O2
endif
# Setup functions for compilation
define dynamic_flags
-I$(shell dirname $(2)) -D__OBJNAME__=$(subst -,_,$(shell basename $(1) .o))
@@ -523,8 +463,8 @@ compile_d2c = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \
$(src_dir)/scripts/d2c.sh -i $(6) -a $(D2C_ALIGN_BYTES) -p $(D2C_NAME_PREFIX) -t $(D2C_PADDING_BYTES) > $(1)
compile_carray = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \
echo " CARRAY $(subst $(build_dir)/,,$(1))"; \
$(eval CARRAY_VAR_LIST := $(carray-$(subst .carray.c,,$(shell basename $(1)))-y)) \
$(src_dir)/scripts/carray.sh -i $(2) -l "$(CARRAY_VAR_LIST)" > $(1) || rm $(1)
$(eval CARRAY_VAR_LIST := $(carray-$(subst .c,,$(shell basename $(1)))-y)) \
$(src_dir)/scripts/carray.sh -i $(2) -l "$(CARRAY_VAR_LIST)" > $(1)
compile_gen_dep = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \
echo " GEN-DEP $(subst $(build_dir)/,,$(1))"; \
echo "$(1:.dep=$(2)): $(3)" >> $(1)
@@ -549,14 +489,14 @@ $(build_dir)/lib/libsbi.a: $(libsbi-objs-path-y)
$(platform_build_dir)/lib/libplatsbi.a: $(libsbi-objs-path-y) $(libsbiutils-objs-path-y) $(platform-objs-path-y)
$(call compile_ar,$@,$^)
$(build_dir)/%.dep: $(src_dir)/%.carray $(KCONFIG_AUTOHEADER)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_AUTOHEADER))
$(build_dir)/%.dep: $(src_dir)/%.carray $(KCONFIG_CONFIG)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_CONFIG))
$(call compile_gen_dep,$@,.o,$(@:.dep=.c))
$(build_dir)/%.carray.c: $(src_dir)/%.carray $(src_dir)/scripts/carray.sh
$(build_dir)/%.c: $(src_dir)/%.carray
$(call compile_carray,$@,$<)
$(build_dir)/%.dep: $(src_dir)/%.c $(KCONFIG_AUTOHEADER)
$(build_dir)/%.dep: $(src_dir)/%.c $(KCONFIG_CONFIG)
$(call compile_cc_dep,$@,$<)
$(build_dir)/%.o: $(src_dir)/%.c
@@ -570,24 +510,24 @@ $(build_dir)/lib/sbi/sbi_init.o: $(libsbi_dir)/sbi_init.c FORCE
$(call compile_cc,$@,$<)
endif
$(build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_AUTOHEADER)
$(build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_CONFIG)
$(call compile_as_dep,$@,$<)
$(build_dir)/%.o: $(src_dir)/%.S
$(call compile_as,$@,$<)
# Rules for platform sources
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.carray $(KCONFIG_AUTOHEADER)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_AUTOHEADER))
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.carray $(KCONFIG_CONFIG)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_CONFIG))
$(call compile_gen_dep,$@,.o,$(@:.dep=.c))
$(platform_build_dir)/%.carray.c: $(platform_src_dir)/%.carray $(src_dir)/scripts/carray.sh
$(platform_build_dir)/%.c: $(platform_src_dir)/%.carray
$(call compile_carray,$@,$<)
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.c $(KCONFIG_AUTOHEADER)
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.c $(KCONFIG_CONFIG)
$(call compile_cc_dep,$@,$<)
$(platform_build_dir)/%.o: $(platform_src_dir)/%.c $(KCONFIG_AUTOHEADER)
$(platform_build_dir)/%.o: $(platform_src_dir)/%.c $(KCONFIG_CONFIG)
$(call compile_cc,$@,$<)
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.S
@@ -596,8 +536,8 @@ $(platform_build_dir)/%.dep: $(platform_src_dir)/%.S
$(platform_build_dir)/%.o: $(platform_src_dir)/%.S
$(call compile_as,$@,$<)
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.dts $(KCONFIG_AUTOHEADER)
$(call compile_gen_dep,$@,.dtb,$< $(KCONFIG_AUTOHEADER))
$(platform_build_dir)/%.dep: $(platform_src_dir)/%.dts $(KCONFIG_CONFIG)
$(call compile_gen_dep,$@,.dtb,$< $(KCONFIG_CONFIG))
$(call compile_gen_dep,$@,.c,$(@:.dep=.dtb))
$(call compile_gen_dep,$@,.o,$(@:.dep=.c))
@@ -614,26 +554,26 @@ $(platform_build_dir)/%.bin: $(platform_build_dir)/%.elf
$(platform_build_dir)/%.elf: $(platform_build_dir)/%.o $(platform_build_dir)/%.elf.ld $(platform_build_dir)/lib/libplatsbi.a
$(call compile_elf,$@,$@.ld,$< $(platform_build_dir)/lib/libplatsbi.a)
$(platform_build_dir)/%.dep: $(src_dir)/%.ldS $(KCONFIG_AUTOHEADER)
$(platform_build_dir)/%.dep: $(src_dir)/%.ldS $(KCONFIG_CONFIG)
$(call compile_cpp_dep,$@,.ld,$<)
$(platform_build_dir)/%.ld: $(src_dir)/%.ldS
$(call compile_cpp,$@,$<)
$(platform_build_dir)/%.dep: $(src_dir)/%.carray $(KCONFIG_AUTOHEADER)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_AUTOHEADER))
$(platform_build_dir)/%.dep: $(src_dir)/%.carray $(KCONFIG_CONFIG)
$(call compile_gen_dep,$@,.c,$< $(KCONFIG_CONFIG))
$(call compile_gen_dep,$@,.o,$(@:.dep=.c))
$(platform_build_dir)/%.carray.c: $(src_dir)/%.carray $(src_dir)/scripts/carray.sh
$(platform_build_dir)/%.c: $(src_dir)/%.carray
$(call compile_carray,$@,$<)
$(platform_build_dir)/%.dep: $(src_dir)/%.c $(KCONFIG_AUTOHEADER)
$(platform_build_dir)/%.dep: $(src_dir)/%.c $(KCONFIG_CONFIG)
$(call compile_cc_dep,$@,$<)
$(platform_build_dir)/%.o: $(src_dir)/%.c
$(call compile_cc,$@,$<)
$(platform_build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_AUTOHEADER)
$(platform_build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_CONFIG)
$(call compile_as_dep,$@,$<)
$(platform_build_dir)/%.o: $(src_dir)/%.S
@@ -721,8 +661,6 @@ clean:
$(CMD_PREFIX)mkdir -p $(build_dir)
$(if $(V), @echo " RM $(build_dir)/*.o")
$(CMD_PREFIX)find $(build_dir) -type f -name "*.o" -exec rm -rf {} +
$(if $(V), @echo " RM $(build_dir)/*.carray.c")
$(CMD_PREFIX)find $(build_dir) -type f -name "*.carray.c" -exec rm -rf {} +
$(if $(V), @echo " RM $(build_dir)/*.a")
$(CMD_PREFIX)find $(build_dir) -type f -name "*.a" -exec rm -rf {} +
$(if $(V), @echo " RM $(build_dir)/*.elf")

View File

@@ -99,7 +99,7 @@ capable enough to bring up all other non-booting harts using HSM extension.
Required Toolchain and Packages
-------------------------------
OpenSBI can be compiled natively or cross-compiled on a host machine. For
OpenSBI can be compiled natively or cross-compiled on a x86 host. For
cross-compilation, you can build your own toolchain, download a prebuilt one
from the [Bootlin toolchain repository] or install a distribution-provided
toolchain; if you opt to use LLVM/Clang, most distribution toolchains will
@@ -108,12 +108,16 @@ LLVM/Clang toolchain due to LLVM's ability to support multiple backends in the
same binary, so is often an easy way to obtain a working cross-compilation
toolchain.
Toolchains with Position Independent Executable (PIE) support like
*riscv64-linux-gnu-gcc*, *riscv64-unknown-freebsd-gcc*, or *Clang/LLVM* are
required in order to generate PIE firmware images that can run at arbitrary
address with appropriate alignment. Bare-metal GNU toolchains (e.g.
*riscv64-unknown-elf-gcc*) cannot be used. *Clang/LLVM* can still generate PIE
images if a bare-metal triple is used (e.g. *-target riscv64-unknown-elf*).
Basically, we prefer toolchains with Position Independent Executable (PIE)
support like *riscv64-linux-gnu-gcc*, *riscv64-unknown-freebsd-gcc*, or
*Clang/LLVM* as they generate PIE firmware images that can run at arbitrary
address with appropriate alignment. If a bare-metal GNU toolchain (e.g.
*riscv64-unknown-elf-gcc*) is used, static linked firmware images are
generated instead. *Clang/LLVM* can still generate PIE images if a bare-metal
triple is used (e.g. *-target riscv64-unknown-elf*).
Please note that only a 64-bit version of the toolchain is available in
the Bootlin toolchain repository for now.
In addition to a toolchain, OpenSBI also requires the following packages on
the host:
@@ -252,18 +256,6 @@ option with:
make LLVM=1
```
To build with a specific version of LLVM, a path to a directory containing the
LLVM tools can be provided:
```
make LLVM=/path/to/llvm/
```
If you have versioned llvm tools you would like to use, such as `clang-17`, the LLVM variable can
be set as:
```
make LLVM=-17
```
When using Clang, *CROSS_COMPILE* often does not need to be defined unless
using GNU binutils with prefixed binary names. *PLATFORM_RISCV_XLEN* will be
used to infer a default triple to pass to Clang, so if *PLATFORM_RISCV_XLEN*
@@ -284,7 +276,8 @@ document.
NOTE: Using Clang with a `riscv*-linux-gnu` GNU binutils linker has been seen
to produce broken binaries with missing relocations; it is therefore currently
recommended that this combination be avoided.
recommended that this combination be avoided or *FW_PIC=n* be used to disable
building OpenSBI as a position-independent binary.
Building with timestamp and compiler info
-----------------------------------------

View File

@@ -1,15 +1,17 @@
OpenSBI Contribution Guideline
==============================
All contributions to OpenSBI must be sent via email patches to the OpenSBI
mailing list at `opensbi@lists.infradead.org`
All contributions to OpenSBI can be sent in the following ways:
1. Email patches to the OpenSBI mailing list at `opensbi@lists.infradead.org`
2. GitHub Pull Requests (PRs) to the [OpenSBI main repository]
To join the OpenSBI mailing list, please visit the [OpenSBI infradead page].
The mailing list based patch approach is preferred over github PRs so that they
are visible to a wider audience. All accepted patches on the OpenSBI mailing
list will be taken by one of the OpenSBI maintainers and merged into the
[OpenSBI main repository].
The OpenSBI maintainers prefer patches via the OpenSBI mailing list
(option 1 above) so that they are visible to a wider audience. All
accepted patches on the OpenSBI mailing list will be taken by any of
the OpenSBI maintainers and merged into the [OpenSBI main repository]
using GitHub PRs.
All contributed work must follow the following rules:
1. OpenSBI code should be written in accordance to the [Linux coding style].
@@ -31,6 +33,9 @@ riscv/opensbi repository. Instead, prefer using a fork of the riscv/opensbi main
repository and branches within that fork to create pull requests.
7. A maintainer cannot merge his own pull requests in the riscv/opensbi main
repository.
8. A pull request must get at least one review from a maintainer.
9. A pull request must spend at least 24 hours in review to allow for other
developers to review.
-----------------------------------------------------------------------

View File

@@ -126,6 +126,9 @@ The DT properties of a domain configuration DT node are as follows:
* **compatible** (Mandatory) - The compatible string of the domain
configuration. This DT property should have value *"opensbi,domain,config"*
* **system-suspend-test** (Optional) - When present, enable a system
suspend test implementation which simply waits five seconds and issues a WFI.
### Domain Memory Region Node
The domain memory region DT node describes details of a memory region and
@@ -178,7 +181,9 @@ The DT properties of a domain instance DT node are as follows:
boot HART of the domain instance.
* **next-arg1** (Optional) - The 64 bit next booting stage arg1 for the
domain instance. If this DT property is not available and coldboot HART
is not assigned to the domain instance then **next booting stage arg1 of coldboot HART**
is not assigned to the domain instance then **0x0** is used as default
value. If this DT property is not available and coldboot HART is assigned
to the domain instance then **next booting stage arg1 of coldboot HART**
is used as default value.
* **next-addr** (Optional) - The 64 bit next booting stage address for the
domain instance. If this DT property is not available and coldboot HART
@@ -232,6 +237,7 @@ be done:
chosen {
opensbi-domains {
compatible = "opensbi,domain,config";
system-suspend-test;
tmem: tmem {
compatible = "opensbi,domain,memregion";

View File

@@ -796,8 +796,6 @@ INPUT = @@SRC_DIR@@/README.md \
@@SRC_DIR@@/docs/platform_requirements.md \
@@SRC_DIR@@/docs/library_usage.md \
@@SRC_DIR@@/docs/domain_support.md \
@@SRC_DIR@@/docs/opensbi_config.md \
@@SRC_DIR@@/docs/writing_tests.md \
@@SRC_DIR@@/docs/firmware \
@@SRC_DIR@@/docs/platform \
@@SRC_DIR@@/include \

View File

@@ -61,15 +61,20 @@ Firmware Configuration and Compilation
All firmware types support the following common compile time configuration
parameters:
* **FW_TEXT_START** - Defines the compile time address of the OpenSBI
firmware. This configuration parameter is optional and assumed to be
`0` if not specified.
* **FW_TEXT_START** - Defines the execution address of the OpenSBI firmware.
This configuration parameter is mandatory.
* **FW_FDT_PATH** - Path to an external flattened device tree binary file to
be embedded in the *.rodata* section of the final firmware. If this option
is not provided then the firmware will expect the FDT to be passed as an
argument by the prior booting stage.
* **FW_FDT_PADDING** - Optional zero bytes padding to the embedded flattened
device tree binary file specified by **FW_FDT_PATH** option.
* **FW_PIC** - "FW_PIC=y" generates position independent executable firmware
images. OpenSBI can run at arbitrary address with appropriate alignment.
Therefore, the original relocation mechanism ("FW_PIC=n") will be skipped.
In other words, OpenSBI will directly run at the load address without any
code movement. This option requires a toolchain with PIE support, and it
is on by default.
Additionally, each firmware type as a set of type specific configuration
parameters. Detailed information for each firmware type can be found in the

View File

@@ -31,14 +31,9 @@ follows:
* **FW_JUMP_ADDR** - Address of the entry point of the booting stage to be
executed following OpenSBI firmware. This address generally corresponds
exactly to the address where this next booting stage was loaded.
At least one of *FW_JUMP_ADDR* and *FW_JUMP_OFFSET* (see below) should be
defined. Compilation errors will result from not defining one of them.
* **FW_JUMP_OFFSET** - Address offset from the opensbi load address where the
entry point of the next booting stage is located. This offset is used as
relocatable address of the next booting stage entry point. If *FW_JUMP_ADDR*
is also defined, the firmware will prefer *FW_JUMP_ADDR*.
exactly to the address where this next booting stage was loaded. This is a
mandatory parameter. Compilation errors will result from not defining this
address.
* **FW_JUMP_FDT_ADDR** - Address where the *flattened device tree (FDT file)*
passed by the prior booting stage will be placed in memory before executing
@@ -62,12 +57,6 @@ follows:
echo fdt overlaps kernel, increase FW_JUMP_FDT_ADDR
```
* **FW_JUMP_FDT_OFFSET** - Address offset from the opensbi load address where
the FDT will be passed to the next booting stage. This offset is used
as relocatable address of the FDT passed to the next booting stage. If
*FW_JUMP_FDT_ADDR* is also defined, the firmware will prefer
*FW_JUMP_FDT_ADDR*.
*FW_JUMP* Example
-----------------

View File

@@ -23,7 +23,7 @@ The *FW_PAYLOAD* firmware can be enabled by any of the following methods:
2. Specifying `FW_PAYLOAD=y` in the target platform *objects.mk* configuration
file.
The compiled *FW_PAYLOAD* firmware ELF file is named *fw_payload.elf*. Its
The compiled *FW_PAYLOAD* firmware ELF file is named *fw_jump.elf*. Its
expanded image file is *fw_payload.bin*. Both files are created in the
platform-specific build directory under the
*build/platform/<platform_subdir>/firmware* directory.
@@ -36,8 +36,8 @@ options. These configuration parameters can be defined using either the top
level `make` command line or the target platform *objects.mk* configuration
file. The parameters currently defined are as follows:
* **FW_PAYLOAD_OFFSET** - Offset from the opensbi load address where the payload
binary will be linked in the final *FW_PAYLOAD* firmware binary image. This
* **FW_PAYLOAD_OFFSET** - Offset from *FW_TEXT_BASE* where the payload binary
will be linked in the final *FW_PAYLOAD* firmware binary image. This
configuration parameter is mandatory if *FW_PAYLOAD_ALIGN* is not defined.
Compilation errors will result from an incorrect definition of
*FW_PAYLOAD_OFFSET* or of *FW_PAYLOAD_ALIGN*, or if neither of these
@@ -62,11 +62,6 @@ file. The parameters currently defined are as follows:
firmware will pass the FDT address passed by the previous booting stage
to the next booting stage.
* **FW_PAYLOAD_FDT_OFFSET** - Address offset from the opensbi load address where
the FDT will be passed to the next booting stage. This offset is used as
relocatable address of the FDT passed to the next booting stage. If
*FW_PAYLOAD_FDT_ADDR* is also defined, the firmware will prefer *FW_PAYLOAD_FDT_ADDR*.
*FW_PAYLOAD* Example
--------------------

View File

@@ -1,91 +0,0 @@
OpenSBI Device Tree Configuration Guideline
==================================
Some configurations of OpenSBI's Generic Platform can be described
in the **device tree (DT) blob** (or flattened device tree) passed
to the OpenSBI firmwares by the previous booting stage. OpenSBI will
parse and use these configurations during the boot phase, but delete
them from the device tree at the end of cold boot.
### OpenSBI Configuration Node
All nodes related to OpenSBI configuration should be under the OpenSBI
configuration DT node. The **/chosen** DT node is the preferred parent
of the OpenSBI configuration DT node.
The DT properties of a domain configuration DT node are as follows:
* **compatible** (Mandatory) - The compatible string of the OpenSBI
configuration. This DT property should have value *"opensbi,config"*
* **cold-boot-harts** (Optional) - If a platform lacks an override
cold_boot_allowed() mechanism, this DT property specifies that a
set of harts is permitted to perform a cold boot. Otherwise, all
harts are allowed to cold boot.
* **heap-size** (Optional) - When present, the specified value is used
as the size of the heap in bytes.
* **system-suspend-test** (Optional) - When present, enable a system
suspend test implementation which simply waits five seconds and issues a WFI.
The OpenSBI Configuration Node will be deleted at the end of cold boot
(replace the node (subtree) with nop tags).
### Example
```text
chosen {
opensbi-config {
compatible = "opensbi,config";
cold-boot-harts = <&cpu1 &cpu2 &cpu3 &cpu4>;
heap-size = <0x400000>;
system-suspend-test;
};
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
timebase-frequency = <10000000>;
cpu0: cpu@0 {
device_type = "cpu";
reg = <0x00>;
compatible = "riscv";
...
};
cpu1: cpu@1 {
device_type = "cpu";
reg = <0x01>;
compatible = "riscv";
...
};
cpu2: cpu@2 {
device_type = "cpu";
reg = <0x02>;
compatible = "riscv";
...
};
cpu3: cpu@3 {
device_type = "cpu";
reg = <0x03>;
compatible = "riscv";
...
};
cpu4: cpu@4 {
device_type = "cpu";
reg = <0x04>;
compatible = "riscv";
...
};
};
uart1: serial@10011000 {
...
};
```

View File

@@ -9,9 +9,10 @@ boards.
By default, the generic FDT platform makes following assumptions:
1. platform features are default
2. platform stack size is default
3. platform has no quirks or work-arounds
1. platform FW_TEXT_START is 0x80000000
2. platform features are default
3. platform stack size is default
4. platform has no quirks or work-arounds
The above assumptions (except 1) can be overridden by adding special platform
callbacks which will be called based on FDT root node compatible string.
@@ -32,6 +33,10 @@ Users of the generic FDT platform will have to ensure that:
To build the platform-specific library and firmware images, provide the
*PLATFORM=generic* parameter to the top level `make` command.
For custom FW_TEXT_START, we can build the platform-specific library and
firmware images by passing *PLATFORM=generic FW_TEXT_START=<custom_text_start>*
parameter to the top level `make` command.
Platform Options
----------------

View File

@@ -56,14 +56,7 @@ qemu-system-riscv64 -M virt -m 256M -nographic \
**Linux Kernel Payload**
Note: We assume that the Linux kernel is compiled using
*arch/riscv/configs/defconfig*. The kernel must be a flattened image (a file
called `Image`) rather than an ELF (`vmlinux`).
Example of building a Linux kernel:
```
make ARCH=riscv CROSS_COMPILE=riscv64-linux- defconfig
make ARCH=riscv CROSS_COMPILE=riscv64-linux- Image
```
*arch/riscv/configs/defconfig*.
Build:
```

View File

@@ -1,7 +1,7 @@
T-HEAD C9xx Series Processors
=============================
The C9xx series processors are high-performance RISC-V architecture
The **C9xx** series processors are high-performance RISC-V architecture
multi-core processors with AI vector acceleration engine.
For more details, refer [T-HEAD.CN](https://www.t-head.cn/)
@@ -12,16 +12,185 @@ To build the platform-specific library and firmware images, provide the
Platform Options
----------------
The T-HEAD C9xx does not have any platform-specific compile options
The *T-HEAD C9xx* does not have any platform-specific compile options
because it uses generic platform.
```
CROSS_COMPILE=riscv64-linux-gnu- PLATFORM=generic make
CROSS_COMPILE=riscv64-linux-gnu- PLATFORM=generic /usr/bin/make
```
Here is the simplest boot flow for a fpga prototype:
The *T-HEAD C9xx* DTB provided to OpenSBI generic firmwares will usually have
"riscv,clint0", "riscv,plic0", "thead,reset-sample" compatible strings.
(Jtag gdbinit) -> (zsb) -> (opensbi) -> (linux)
DTS Example1: (Single core, eg: Allwinner D1 - c906)
----------------------------------------------------
For more details, refer:
[zero stage boot](https://github.com/c-sky/zero_stage_boot)
```
cpus {
#address-cells = <1>;
#size-cells = <0>;
timebase-frequency = <3000000>;
cpu@0 {
device_type = "cpu";
reg = <0>;
status = "okay";
compatible = "riscv";
riscv,isa = "rv64imafdcv";
mmu-type = "riscv,sv39";
cpu0_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
};
soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
ranges;
clint0: clint@14000000 {
compatible = "allwinner,sun20i-d1-clint";
interrupts-extended = <
&cpu0_intc 3 &cpu0_intc 7
>;
reg = <0x0 0x14000000 0x0 0x04000000>;
};
intc: interrupt-controller@10000000 {
#interrupt-cells = <1>;
compatible = "allwinner,sun20i-d1-plic",
"thead,c900-plic";
interrupt-controller;
interrupts-extended = <
&cpu0_intc 0xffffffff &cpu0_intc 9
>;
reg = <0x0 0x10000000 0x0 0x04000000>;
reg-names = "control";
riscv,max-priority = <7>;
riscv,ndev = <200>;
};
}
```
DTS Example2: (Multi cores with soc reset-regs)
-----------------------------------------------
```
cpus {
#address-cells = <1>;
#size-cells = <0>;
timebase-frequency = <3000000>;
cpu@0 {
device_type = "cpu";
reg = <0>;
status = "okay";
compatible = "riscv";
riscv,isa = "rv64imafdc";
mmu-type = "riscv,sv39";
cpu0_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu@1 {
device_type = "cpu";
reg = <1>;
status = "fail";
compatible = "riscv";
riscv,isa = "rv64imafdc";
mmu-type = "riscv,sv39";
cpu1_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu@2 {
device_type = "cpu";
reg = <2>;
status = "fail";
compatible = "riscv";
riscv,isa = "rv64imafdc";
mmu-type = "riscv,sv39";
cpu2_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu@3 {
device_type = "cpu";
reg = <3>;
status = "fail";
compatible = "riscv";
riscv,isa = "rv64imafdc";
mmu-type = "riscv,sv39";
cpu3_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
};
soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
ranges;
reset: reset-sample {
compatible = "thead,reset-sample";
entry-reg = <0xff 0xff019050>;
entry-cnt = <4>;
control-reg = <0xff 0xff015004>;
control-val = <0x1c>;
csr-copy = <0x7f3 0x7c0 0x7c1 0x7c2 0x7c3 0x7c5 0x7cc>;
};
clint0: clint@ffdc000000 {
compatible = "riscv,clint0";
interrupts-extended = <
&cpu0_intc 3 &cpu0_intc 7
&cpu1_intc 3 &cpu1_intc 7
&cpu2_intc 3 &cpu2_intc 7
&cpu3_intc 3 &cpu3_intc 7
&cpu4_intc 3 &cpu4_intc 7
>;
reg = <0xff 0xdc000000 0x0 0x04000000>;
};
intc: interrupt-controller@ffd8000000 {
#interrupt-cells = <1>;
compatible = "thead,c900-plic";
interrupt-controller;
interrupts-extended = <
&cpu0_intc 0xffffffff &cpu0_intc 9
&cpu1_intc 0xffffffff &cpu1_intc 9
&cpu2_intc 0xffffffff &cpu2_intc 9
&cpu3_intc 0xffffffff &cpu3_intc 9
>;
reg = <0xff 0xd8000000 0x0 0x04000000>;
reg-names = "control";
riscv,max-priority = <7>;
riscv,ndev = <80>;
};
}
```
DTS Example2: (Multi cores with old reset csrs)
-----------------------------------------------
```
reset: reset-sample {
compatible = "thead,reset-sample";
using-csr-reset;
csr-copy = <0x7c0 0x7c1 0x7c2 0x7c3 0x7c5 0x7cc
0x3b0 0x3b1 0x3b2 0x3b3
0x3b4 0x3b5 0x3b6 0x3b7
0x3a0>;
};
```

View File

@@ -18,11 +18,7 @@ Base Platform Requirements
The base RISC-V platform requirements for OpenSBI are as follows:
1. At least rv32ima_zicsr or rv64ima_zicsr required on all HARTs
* Users may restrict the usage of atomic instructions to lr/sc
via rv32im_zalrsc_zicsr or rv64im_zalrsc_zicsr if preferred
1. At least rv32ima or rv64ima required on all HARTs
2. At least one HART should have S-mode support because:
* SBI calls are meant for RISC-V S-mode (Supervisor mode)
@@ -37,7 +33,7 @@ The base RISC-V platform requirements for OpenSBI are as follows:
6. Hardware support for injecting M-mode software interrupts on
a multi-HART platform
The RISC-V extensions not covered by rv32ima_zicsr or rv64ima_zicsr are optional
The RISC-V extensions not covered by rv32ima or rv64ima are optional
for OpenSBI. Although, OpenSBI will detect and handle some of these
optional RISC-V extensions at runtime.

View File

@@ -74,10 +74,10 @@ pmu {
<0x10000 0x10033 0x000ff000>;
/* For event ID 0x0002 */
riscv,raw-event-to-mhpmcounters = <0x0000 0x0002 0xffffffff 0xffffffff 0x00000f8>,
/* For event ID 0-15 */
/* For event ID 0-4 */
<0x0 0x0 0xffffffff 0xfffffff0 0x00000ff0>,
/* For event ID 0xffffffff0000000f - 0xffffffff000000ff */
<0xffffffff 0xf 0xffffffff 0xffffff0f 0x00000ff0>;
<0xffffffff 0x0 0xffffffff 0xffffff0f 0x00000ff0>;
};
```
@@ -125,85 +125,3 @@ pmu {
<0x0 0x2 0xffffffff 0xffffe0ff 0x18>;
};
```
### Example 3
```
/*
* For Andes 45-series platforms. The encodings can be found in the
* "Machine Performance Monitoring Event Selector" section
* http://www.andestech.com/wp-content/uploads/AX45MP-1C-Rev.-5.0.0-Datasheet.pdf
*/
pmu {
compatible = "riscv,pmu";
riscv,event-to-mhpmevent =
<0x1 0x0000 0x10>, /* CPU_CYCLES -> Cycle count */
<0x2 0x0000 0x20>, /* INSTRUCTIONS -> Retired instruction count */
<0x3 0x0000 0x41>, /* CACHE_REFERENCES -> D-Cache access */
<0x4 0x0000 0x51>, /* CACHE_MISSES -> D-Cache miss */
<0x5 0x0000 0x80>, /* BRANCH_INSTRUCTIONS -> Conditional branch instruction count */
<0x6 0x0000 0x02>, /* BRANCH_MISSES -> Misprediction of conditional branches */
<0x10000 0x0000 0x61>, /* L1D_READ_ACCESS -> D-Cache load access */
<0x10001 0x0000 0x71>, /* L1D_READ_MISS -> D-Cache load miss */
<0x10002 0x0000 0x81>, /* L1D_WRITE_ACCESS -> D-Cache store access */
<0x10003 0x0000 0x91>, /* L1D_WRITE_MISS -> D-Cache store miss */
<0x10008 0x0000 0x21>, /* L1I_READ_ACCESS -> I-Cache access */
<0x10009 0x0000 0x31>; /* L1I_READ_MISS -> I-Cache miss */
riscv,event-to-mhpmcounters = <0x1 0x6 0x78>,
<0x10000 0x10003 0x78>,
<0x10008 0x10009 0x78>;
riscv,raw-event-to-mhpmcounters =
<0x0 0x10 0xffffffff 0xffffffff 0x78>, /* Cycle count */
<0x0 0x20 0xffffffff 0xffffffff 0x78>, /* Retired instruction count */
<0x0 0x30 0xffffffff 0xffffffff 0x78>, /* Integer load instruction count */
<0x0 0x40 0xffffffff 0xffffffff 0x78>, /* Integer store instruction count */
<0x0 0x50 0xffffffff 0xffffffff 0x78>, /* Atomic instruction count */
<0x0 0x60 0xffffffff 0xffffffff 0x78>, /* System instruction count */
<0x0 0x70 0xffffffff 0xffffffff 0x78>, /* Integer computational instruction count */
<0x0 0x80 0xffffffff 0xffffffff 0x78>, /* Conditional branch instruction count */
<0x0 0x90 0xffffffff 0xffffffff 0x78>, /* Taken conditional branch instruction count */
<0x0 0xA0 0xffffffff 0xffffffff 0x78>, /* JAL instruction count */
<0x0 0xB0 0xffffffff 0xffffffff 0x78>, /* JALR instruction count */
<0x0 0xC0 0xffffffff 0xffffffff 0x78>, /* Return instruction count */
<0x0 0xD0 0xffffffff 0xffffffff 0x78>, /* Control transfer instruction count */
<0x0 0xE0 0xffffffff 0xffffffff 0x78>, /* EXEC.IT instruction count */
<0x0 0xF0 0xffffffff 0xffffffff 0x78>, /* Integer multiplication instruction count */
<0x0 0x100 0xffffffff 0xffffffff 0x78>, /* Integer division instruction count */
<0x0 0x110 0xffffffff 0xffffffff 0x78>, /* Floating-point load instruction count */
<0x0 0x120 0xffffffff 0xffffffff 0x78>, /* Floating-point store instruction count */
<0x0 0x130 0xffffffff 0xffffffff 0x78>, /* Floating-point addition/subtraction instruction count */
<0x0 0x140 0xffffffff 0xffffffff 0x78>, /* Floating-point multiplication instruction count */
<0x0 0x150 0xffffffff 0xffffffff 0x78>, /* Floating-point fused multiply-add instruction count */
<0x0 0x160 0xffffffff 0xffffffff 0x78>, /* Floating-point division or square-root instruction count */
<0x0 0x170 0xffffffff 0xffffffff 0x78>, /* Other floating-point instruction count */
<0x0 0x180 0xffffffff 0xffffffff 0x78>, /* Integer multiplication and add/sub instruction count */
<0x0 0x190 0xffffffff 0xffffffff 0x78>, /* Retired operation count */
<0x0 0x01 0xffffffff 0xffffffff 0x78>, /* ILM access */
<0x0 0x11 0xffffffff 0xffffffff 0x78>, /* DLM access */
<0x0 0x21 0xffffffff 0xffffffff 0x78>, /* I-Cache access */
<0x0 0x31 0xffffffff 0xffffffff 0x78>, /* I-Cache miss */
<0x0 0x41 0xffffffff 0xffffffff 0x78>, /* D-Cache access */
<0x0 0x51 0xffffffff 0xffffffff 0x78>, /* D-Cache miss */
<0x0 0x61 0xffffffff 0xffffffff 0x78>, /* D-Cache load access */
<0x0 0x71 0xffffffff 0xffffffff 0x78>, /* D-Cache load miss */
<0x0 0x81 0xffffffff 0xffffffff 0x78>, /* D-Cache store access */
<0x0 0x91 0xffffffff 0xffffffff 0x78>, /* D-Cache store miss */
<0x0 0xA1 0xffffffff 0xffffffff 0x78>, /* D-Cache writeback */
<0x0 0xB1 0xffffffff 0xffffffff 0x78>, /* Cycles waiting for I-Cache fill data */
<0x0 0xC1 0xffffffff 0xffffffff 0x78>, /* Cycles waiting for D-Cache fill data */
<0x0 0xD1 0xffffffff 0xffffffff 0x78>, /* Uncached fetch data access from bus */
<0x0 0xE1 0xffffffff 0xffffffff 0x78>, /* Uncached load data access from bus */
<0x0 0xF1 0xffffffff 0xffffffff 0x78>, /* Cycles waiting for uncached fetch data from bus */
<0x0 0x101 0xffffffff 0xffffffff 0x78>, /* Cycles waiting for uncached load data from bus */
<0x0 0x111 0xffffffff 0xffffffff 0x78>, /* Main ITLB access */
<0x0 0x121 0xffffffff 0xffffffff 0x78>, /* Main ITLB miss */
<0x0 0x131 0xffffffff 0xffffffff 0x78>, /* Main DTLB access */
<0x0 0x141 0xffffffff 0xffffffff 0x78>, /* Main DTLB miss */
<0x0 0x151 0xffffffff 0xffffffff 0x78>, /* Cycles waiting for Main ITLB fill data */
<0x0 0x161 0xffffffff 0xffffffff 0x78>, /* Pipeline stall cycles caused by Main DTLB miss */
<0x0 0x171 0xffffffff 0xffffffff 0x78>, /* Hardware prefetch bus access */
<0x0 0x02 0xffffffff 0xffffffff 0x78>, /* Misprediction of conditional branches */
<0x0 0x12 0xffffffff 0xffffffff 0x78>, /* Misprediction of taken conditional branches */
<0x0 0x22 0xffffffff 0xffffffff 0x78>; /* Misprediction of targets of Return instructions */
};
```

View File

@@ -1,127 +0,0 @@
Writing tests for OpenSBI
=========================
SBIUnit
-------
SBIUnit is a set of macros and functions which simplify the test development and
automate the test execution and evaluation. All of the SBIUnit definitions are
in the `include/sbi/sbi_unit_test.h` header file, and implementations are
available in `lib/sbi/tests/sbi_unit_test.c`.
Simple SBIUnit test
-------------------
For instance, we would like to test the following function from
`lib/sbi/sbi_string.c`:
```c
size_t sbi_strlen(const char *str)
{
unsigned long ret = 0;
while (*str != '\0') {
ret++;
str++;
}
return ret;
}
```
which calculates the string length.
Create the file `lib/sbi/tests/sbi_string_test.c` with the following content:
```c
#include <sbi/sbi_unit_test.h>
#include <sbi/sbi_string.h>
static void strlen_test(struct sbiunit_test_case *test)
{
SBIUNIT_EXPECT_EQ(test, sbi_strlen("Hello"), 5);
SBIUNIT_EXPECT_EQ(test, sbi_strlen("Hell\0o"), 4);
}
static struct sbiunit_test_case string_test_cases[] = {
SBIUNIT_TEST_CASE(strlen_test),
SBIUNIT_END_CASE,
};
SBIUNIT_TEST_SUITE(string_test_suite, string_test_cases);
```
Then, add the corresponding Makefile entries to `lib/sbi/tests/objects.mk`:
```lang-makefile
...
carray-sbi_unit_tests-$(CONFIG_SBIUNIT) += string_test_suite
libsbi-objs-$(CONFIG_SBIUNIT) += tests/sbi_string_test.o
```
Now, run `make clean` in order to regenerate the carray-related files.
Recompile OpenSBI with the CONFIG_SBIUNIT option enabled and run it in QEMU.
You will see something like this:
```
# make PLATFORM=generic run
...
# Running SBIUNIT tests #
...
## Running test suite: string_test_suite
[PASSED] strlen_test
1 PASSED / 0 FAILED / 1 TOTAL
```
Now let's try to change this test in the way that it will fail:
```c
- SBIUNIT_EXPECT_EQ(test, sbi_strlen("Hello"), 5);
+ SBIUNIT_EXPECT_EQ(test, sbi_strlen("Hello"), 100);
```
`make all` and `make run` it again:
```
...
# Running SBIUNIT tests #
...
## Running test suite: string_test_suite
[SBIUnit] [.../opensbi/lib/sbi/tests/sbi_string_test.c:6]: strlen_test: Condition "(sbi_strlen("Hello")) == (100)" expected to be true!
[FAILED] strlen_test
0 PASSED / 1 FAILED / 1 TOTAL
```
Covering the static functions / using the static definitions
------------------------------------------------------------
SBIUnit also allows you to test static functions. In order to do so, simply
include your test source in the file you would like to test. Complementing the
example above, just add this to the `lib/sbi/sbi_string.c` file:
```c
#ifdef CONFIG_SBIUNIT
#include "tests/sbi_string_test.c"
#endif
```
In this case you should only add a new carray entry pointing to the test suite
to `lib/sbi/tests/objects.mk`:
```lang-makefile
...
carray-sbi_unit_tests-$(CONFIG_SBIUNIT) += string_test_suite
```
You don't have to compile the `sbi_string_test.o` separately, because the
test code will be included into the `sbi_string` object file.
"Mocking" the structures
------------------------
See the example of structure "mocking" in `lib/sbi/tests/sbi_console_test.c`,
where the sbi_console_device structure was mocked to be used in various
console-related functions in order to test them.
API Reference
-------------
All of the `SBIUNIT_EXPECT_*` macros will cause a test case to fail if the
corresponding conditions are not met, however, the execution of a particular
test case will not be stopped.
All of the `SBIUNIT_ASSERT_*` macros will cause a test case to fail and stop
immediately, triggering a panic.

View File

@@ -1,28 +1 @@
# SPDX-License-Identifier: BSD-2-Clause
menu "Stack Protector Support"
config STACK_PROTECTOR
bool "Stack Protector buffer overflow detection"
default n
help
This option turns on the "stack-protector" compiler feature.
config STACK_PROTECTOR_STRONG
bool "Strong Stack Protector"
depends on STACK_PROTECTOR
default n
help
Turn on the "stack-protector" with "-fstack-protector-strong" option.
Like -fstack-protector but includes additional functions to be
protected.
config STACK_PROTECTOR_ALL
bool "Almighty Stack Protector"
depends on STACK_PROTECTOR
default n
help
Turn on the "stack-protector" with "-fstack-protector-all" option.
Like -fstack-protector except that all functions are protected.
endmenu

View File

@@ -14,8 +14,8 @@
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_trap.h>
#define BOOT_LOTTERY_ACQUIRED 1
#define BOOT_STATUS_BOOT_HART_DONE 1
#define BOOT_STATUS_RELOCATE_DONE 1
#define BOOT_STATUS_BOOT_HART_DONE 2
.macro MOV_3R __d0, __s0, __d1, __s1, __d2, __s2
add \__d0, \__s0, zero
@@ -31,14 +31,15 @@
add \__d4, \__s4, zero
.endm
.macro CLEAR_MDT tmp
#if __riscv_xlen == 32
li \tmp, MSTATUSH_MDT
csrc CSR_MSTATUSH, \tmp
#else
li \tmp, MSTATUS_MDT
csrc CSR_MSTATUS, \tmp
#endif
/*
* If __start_reg <= __check_reg and __check_reg < __end_reg then
* jump to __pass
*/
.macro BRANGE __start_reg, __end_reg, __check_reg, __jump_lable
blt \__check_reg, \__start_reg, 999f
bge \__check_reg, \__end_reg, 999f
j \__jump_lable
999:
.endm
.section .entry, "ax", %progbits
@@ -54,48 +55,162 @@ _start:
li a7, -1
beq a6, a7, _try_lottery
/* Jump to relocation wait loop if we are not boot hart */
bne a0, a6, _wait_for_boot_hart
bne a0, a6, _wait_relocate_copy_done
_try_lottery:
/* Jump to relocation wait loop if we don't get relocation lottery */
lla a6, _boot_lottery
li a7, BOOT_LOTTERY_ACQUIRED
#ifdef __riscv_atomic
amoswap.w a6, a7, (a6)
bnez a6, _wait_for_boot_hart
#elif __riscv_zalrsc
_sc_fail:
lr.w t0, (a6)
sc.w t1, a7, (a6)
bnez t1, _sc_fail
bnez t0, _wait_for_boot_hart
#else
#error "need a or zalrsc"
#endif
lla a6, _relocate_lottery
li a7, 1
amoadd.w a6, a7, (a6)
bnez a6, _wait_relocate_copy_done
/* Save load address */
lla t0, _load_start
lla t1, _fw_start
REG_S t1, 0(t0)
#ifdef FW_PIC
/* relocate the global table content */
li t0, FW_TEXT_START /* link start */
lla t1, _fw_start /* load start */
sub t2, t1, t0 /* load offset */
lla t0, __rela_dyn_start
lla t1, __rela_dyn_end
lla t0, _link_start
REG_L t0, 0(t0)
/* t1 shall has the address of _fw_start */
sub t2, t1, t0
lla t3, _runtime_offset
REG_S t2, (t3)
lla t0, __rel_dyn_start
lla t1, __rel_dyn_end
beq t0, t1, _relocate_done
2:
REG_L t5, __SIZEOF_LONG__(t0) /* t5 <-- relocation info:type */
REG_L t5, REGBYTES(t0) /* t5 <-- relocation info:type */
li t3, R_RISCV_RELATIVE /* reloc type R_RISCV_RELATIVE */
bne t5, t3, 3f
REG_L t3, 0(t0)
REG_L t5, (__SIZEOF_LONG__ * 2)(t0) /* t5 <-- addend */
REG_L t5, (REGBYTES * 2)(t0) /* t5 <-- addend */
add t5, t5, t2
add t3, t3, t2
REG_S t5, 0(t3) /* store runtime address to the GOT entry */
j 5f
3:
addi t0, t0, (__SIZEOF_LONG__ * 3)
lla t4, __dyn_sym_start
4:
srli t6, t5, SYM_INDEX /* t6 <--- sym table index */
andi t5, t5, 0xFF /* t5 <--- relocation type */
li t3, RELOC_TYPE
bne t5, t3, 5f
/* address R_RISCV_64 or R_RISCV_32 cases*/
REG_L t3, 0(t0)
li t5, SYM_SIZE
mul t6, t6, t5
add s5, t4, t6
REG_L t6, (REGBYTES * 2)(t0) /* t0 <-- addend */
REG_L t5, REGBYTES(s5)
add t5, t5, t6
add t5, t5, t2 /* t5 <-- location to fix up in RAM */
add t3, t3, t2 /* t3 <-- location to fix up in RAM */
REG_S t5, 0(t3) /* store runtime address to the variable */
5:
addi t0, t0, (REGBYTES * 3)
blt t0, t1, 2b
j _relocate_done
_wait_relocate_copy_done:
j _wait_for_boot_hart
#else
/* Relocate if load address != link address */
_relocate:
lla t0, _link_start
REG_L t0, 0(t0)
lla t1, _link_end
REG_L t1, 0(t1)
lla t2, _load_start
REG_L t2, 0(t2)
beq t0, t2, _relocate_done
sub t3, t1, t0
add t3, t3, t2
lla t4, _relocate_done
sub t4, t4, t2
add t4, t4, t0
blt t2, t0, _relocate_copy_to_upper
_relocate_copy_to_lower:
ble t1, t2, _relocate_copy_to_lower_loop
lla t3, _relocate_lottery
BRANGE t2, t1, t3, _start_hang
lla t3, _boot_status
BRANGE t2, t1, t3, _start_hang
lla t3, _relocate
lla t5, _relocate_done
BRANGE t2, t1, t3, _start_hang
BRANGE t2, t1, t5, _start_hang
BRANGE t3, t5, t2, _start_hang
_relocate_copy_to_lower_loop:
REG_L t3, 0(t2)
REG_S t3, 0(t0)
add t0, t0, __SIZEOF_POINTER__
add t2, t2, __SIZEOF_POINTER__
blt t0, t1, _relocate_copy_to_lower_loop
jr t4
_relocate_copy_to_upper:
ble t3, t0, _relocate_copy_to_upper_loop
lla t2, _relocate_lottery
BRANGE t0, t3, t2, _start_hang
lla t2, _boot_status
BRANGE t0, t3, t2, _start_hang
lla t2, _relocate
lla t5, _relocate_done
BRANGE t0, t3, t2, _start_hang
BRANGE t0, t3, t5, _start_hang
BRANGE t2, t5, t0, _start_hang
_relocate_copy_to_upper_loop:
add t3, t3, -__SIZEOF_POINTER__
add t1, t1, -__SIZEOF_POINTER__
REG_L t2, 0(t3)
REG_S t2, 0(t1)
blt t0, t1, _relocate_copy_to_upper_loop
jr t4
_wait_relocate_copy_done:
lla t0, _fw_start
lla t1, _link_start
REG_L t1, 0(t1)
beq t0, t1, _wait_for_boot_hart
lla t2, _boot_status
lla t3, _wait_for_boot_hart
sub t3, t3, t0
add t3, t3, t1
1:
/* waitting for relocate copy done (_boot_status == 1) */
li t4, BOOT_STATUS_RELOCATE_DONE
REG_L t5, 0(t2)
/* Reduce the bus traffic so that boot hart may proceed faster */
nop
nop
nop
bgt t4, t5, 1b
jr t3
#endif
_relocate_done:
/*
* Mark relocate copy done
* Use _boot_status copy relative to the load address
*/
lla t0, _boot_status
#ifndef FW_PIC
lla t1, _link_start
REG_L t1, 0(t1)
lla t2, _load_start
REG_L t2, 0(t2)
sub t0, t0, t1
add t0, t0, t2
#endif
li t1, BOOT_STATUS_RELOCATE_DONE
REG_S t1, 0(t0)
fence rw, rw
/* At this point we are running from link address */
/* Reset all registers except ra, a0, a1, a2, a3 and a4 for boot HART */
/* Reset all registers for boot HART */
li ra, 0
call _reset_regs
@@ -111,14 +226,6 @@ _bss_zero:
lla s4, _start_hang
csrw CSR_MTVEC, s4
/*
* While at this point, trap handling is rudimentary, if a trap happens,
* it will end up in _start_hang which is enough to hook up a GDB. Clear
* MDT to avoid generating a double trap and thus entering a
* critical-error state.
*/
CLEAR_MDT t0
/* Setup temporary stack */
lla s4, _fw_end
li s5, (SBI_SCRATCH_SIZE * 2)
@@ -202,8 +309,8 @@ _scratch_init:
REG_S a5, SBI_SCRATCH_FW_SIZE_OFFSET(tp)
/* Store R/W section's offset in scratch space */
lla a5, _fw_rw_start
sub a5, a5, a4
lla a4, __fw_rw_offset
REG_L a5, 0(a4)
REG_S a5, SBI_SCRATCH_FW_RW_OFFSET(tp)
/* Store fw_heap_offset and fw_heap_size in scratch space */
@@ -234,8 +341,10 @@ _scratch_init:
/* Store hartid-to-scratch function address in scratch space */
lla a4, _hartid_to_scratch
REG_S a4, SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET(tp)
/* Clear trap_context and tmp0 in scratch space */
REG_S zero, SBI_SCRATCH_TRAP_CONTEXT_OFFSET(tp)
/* Store trap-exit function address in scratch space */
lla a4, _trap_exit
REG_S a4, SBI_SCRATCH_TRAP_EXIT_OFFSET(tp)
/* Clear tmp0 in scratch space */
REG_S zero, SBI_SCRATCH_TMP0_OFFSET(tp)
/* Store firmware options in scratch space */
MOV_3R s0, a0, s1, a1, s2, a2
@@ -246,8 +355,6 @@ _scratch_init:
#endif
REG_S a0, SBI_SCRATCH_OPTIONS_OFFSET(tp)
MOV_3R a0, s0, a1, s1, a2, s2
/* Store hart index in scratch space */
REG_S t1, SBI_SCRATCH_HARTINDEX_OFFSET(tp)
/* Move to next scratch space */
add t1, t1, t2
blt t1, s7, _scratch_init
@@ -272,14 +379,30 @@ _scratch_init:
beq t1, a1, _fdt_reloc_done
/* t0 = source FDT start address */
add t0, a1, zero
/* t2 = source FDT size (convert from big-endian) */
lbu t2, 7(t0)
lbu t3, 6(t0)
lbu t4, 5(t0)
lbu t5, 4(t0)
/* t2 = source FDT size in big-endian */
#if __riscv_xlen == 64
lwu t2, 4(t0)
#else
lw t2, 4(t0)
#endif
/* t3 = bit[15:8] of FDT size */
add t3, t2, zero
srli t3, t3, 16
and t3, t3, a4
slli t3, t3, 8
/* t4 = bit[23:16] of FDT size */
add t4, t2, zero
srli t4, t4, 8
and t4, t4, a4
slli t4, t4, 16
/* t5 = bit[31:24] of FDT size */
add t5, t2, zero
and t5, t5, a4
slli t5, t5, 24
/* t2 = bit[7:0] of FDT size */
srli t2, t2, 24
and t2, t2, a4
/* t2 = FDT size in little-endian */
or t2, t2, t3
or t2, t2, t4
or t2, t2, t5
@@ -298,23 +421,23 @@ _fdt_reloc_done:
/* mark boot hart done */
li t0, BOOT_STATUS_BOOT_HART_DONE
lla t1, _boot_status
fence rw, rw
REG_S t0, 0(t1)
fence rw, rw
j _start_warm
/* waiting for boot hart to be done (_boot_status == BOOT_STATUS_BOOT_HART_DONE) */
/* waiting for boot hart to be done (_boot_status == 2) */
_wait_for_boot_hart:
li t0, BOOT_STATUS_BOOT_HART_DONE
lla t1, _boot_status
REG_L t1, 0(t1)
/* Reduce the bus traffic so that boot hart may proceed faster */
div t2, t2, zero
div t2, t2, zero
div t2, t2, zero
nop
nop
nop
bne t0, t1, _wait_for_boot_hart
_start_warm:
/* Reset all registers except ra, a0, a1, a2, a3 and a4 for non-boot HART */
/* Reset all registers for non-boot HARTs */
li ra, 0
call _reset_regs
@@ -323,7 +446,7 @@ _start_warm:
/* Find HART count and HART stack size */
lla a4, platform
#if __riscv_xlen > 32
#if __riscv_xlen == 64
lwu s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4)
lwu s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4)
#else
@@ -339,7 +462,7 @@ _start_warm:
beqz s9, 3f
li a4, 0
1:
#if __riscv_xlen > 32
#if __riscv_xlen == 64
lwu a5, (s9)
#else
lw a5, (s9)
@@ -368,16 +491,27 @@ _start_warm:
/* Setup trap handler */
lla a4, _trap_handler
#if __riscv_xlen == 32
csrr a5, CSR_MISA
srli a5, a5, ('H' - 'A')
andi a5, a5, 0x1
beq a5, zero, _skip_trap_handler_hyp
lla a4, _trap_handler_hyp
_skip_trap_handler_hyp:
beq a5, zero, _skip_trap_handler_rv32_hyp
lla a4, _trap_handler_rv32_hyp
_skip_trap_handler_rv32_hyp:
#endif
csrw CSR_MTVEC, a4
/* Clear MDT here again for all harts */
CLEAR_MDT t0
#if __riscv_xlen == 32
/* Override trap exit for H-extension */
csrr a5, CSR_MISA
srli a5, a5, ('H' - 'A')
andi a5, a5, 0x1
beq a5, zero, _skip_trap_exit_rv32_hyp
lla a4, _trap_exit_rv32_hyp
csrr a5, CSR_MSCRATCH
REG_S a4, SBI_SCRATCH_TRAP_EXIT_OFFSET(a5)
_skip_trap_exit_rv32_hyp:
#endif
/* Initialize SBI runtime */
csrr a0, CSR_MSCRATCH
@@ -388,10 +522,22 @@ _skip_trap_handler_hyp:
.data
.align 3
_boot_lottery:
#ifdef FW_PIC
_runtime_offset:
RISCV_PTR 0
#endif
_relocate_lottery:
RISCV_PTR 0
_boot_status:
RISCV_PTR 0
_load_start:
RISCV_PTR _fw_start
_link_start:
RISCV_PTR FW_TEXT_START
_link_end:
RISCV_PTR _fw_reloc_end
__fw_rw_offset:
RISCV_PTR _fw_rw_start - _fw_start
.section .entry, "ax", %progbits
.align 3
@@ -405,7 +551,7 @@ _hartid_to_scratch:
* t2 -> Temporary
*/
lla t2, platform
#if __riscv_xlen > 32
#if __riscv_xlen == 64
lwu t0, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(t2)
lwu t2, SBI_PLATFORM_HART_COUNT_OFFSET(t2)
#else
@@ -489,10 +635,10 @@ memcmp:
xor t0, tp, t0
/* Save original SP on exception stack */
REG_S sp, (SBI_TRAP_REGS_OFFSET(sp) - SBI_TRAP_CONTEXT_SIZE)(t0)
REG_S sp, (SBI_TRAP_REGS_OFFSET(sp) - SBI_TRAP_REGS_SIZE)(t0)
/* Set SP to exception stack and make room for trap context */
add sp, t0, -(SBI_TRAP_CONTEXT_SIZE)
/* Set SP to exception stack and make room for trap registers */
add sp, t0, -(SBI_TRAP_REGS_SIZE)
/* Restore T0 from scratch space */
REG_L t0, SBI_SCRATCH_TMP0_OFFSET(tp)
@@ -552,35 +698,6 @@ memcmp:
REG_S t6, SBI_TRAP_REGS_OFFSET(t6)(sp)
.endm
.macro TRAP_SAVE_INFO have_mstatush have_h_extension
csrr t0, CSR_MCAUSE
REG_S t0, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(cause))(sp)
csrr t0, CSR_MTVAL
REG_S t0, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(tval))(sp)
.if \have_h_extension
csrr t0, CSR_MTVAL2
REG_S t0, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(tval2))(sp)
csrr t0, CSR_MTINST
REG_S t0, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(tinst))(sp)
.if \have_mstatush
csrr t0, CSR_MSTATUSH
srli t0, t0, MSTATUSH_GVA_SHIFT
.else
csrr t0, CSR_MSTATUS
srli t0, t0, MSTATUS_GVA_SHIFT
.endif
and t0, t0, 0x1
.else
REG_S zero, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(tval2))(sp)
REG_S zero, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(tinst))(sp)
li t0, 0
.endif
REG_S t0, (SBI_TRAP_REGS_SIZE + SBI_TRAP_INFO_OFFSET(gva))(sp)
/* We are ready to take another trap, clear MDT */
CLEAR_MDT t0
.endm
.macro TRAP_CALL_C_ROUTINE
/* Call C routine */
add a0, sp, zero
@@ -621,18 +738,15 @@ memcmp:
.endm
.macro TRAP_RESTORE_MEPC_MSTATUS have_mstatush
/*
* Restore MSTATUS and MEPC CSRs starting with MSTATUS/H to set MDT
* flags since we can not take a trap now or MEPC would be cloberred
*/
/* Restore MEPC and MSTATUS CSRs */
REG_L t0, SBI_TRAP_REGS_OFFSET(mepc)(a0)
csrw CSR_MEPC, t0
REG_L t0, SBI_TRAP_REGS_OFFSET(mstatus)(a0)
csrw CSR_MSTATUS, t0
.if \have_mstatush
REG_L t0, SBI_TRAP_REGS_OFFSET(mstatusH)(a0)
csrw CSR_MSTATUSH, t0
.endif
REG_L t0, SBI_TRAP_REGS_OFFSET(mstatus)(a0)
csrw CSR_MSTATUS, t0
REG_L t0, SBI_TRAP_REGS_OFFSET(mepc)(a0)
csrw CSR_MEPC, t0
.endm
.macro TRAP_RESTORE_A0_T0
@@ -646,6 +760,7 @@ memcmp:
.section .entry, "ax", %progbits
.align 3
.globl _trap_handler
.globl _trap_exit
_trap_handler:
TRAP_SAVE_AND_SETUP_SP_T0
@@ -653,10 +768,9 @@ _trap_handler:
TRAP_SAVE_GENERAL_REGS_EXCEPT_SP_T0
TRAP_SAVE_INFO 0 0
TRAP_CALL_C_ROUTINE
_trap_exit:
TRAP_RESTORE_GENERAL_REGS_EXCEPT_A0_T0
TRAP_RESTORE_MEPC_MSTATUS 0
@@ -665,39 +779,29 @@ _trap_handler:
mret
#if __riscv_xlen == 32
.section .entry, "ax", %progbits
.align 3
.globl _trap_handler_hyp
_trap_handler_hyp:
.globl _trap_handler_rv32_hyp
.globl _trap_exit_rv32_hyp
_trap_handler_rv32_hyp:
TRAP_SAVE_AND_SETUP_SP_T0
#if __riscv_xlen == 32
TRAP_SAVE_MEPC_MSTATUS 1
#else
TRAP_SAVE_MEPC_MSTATUS 0
#endif
TRAP_SAVE_GENERAL_REGS_EXCEPT_SP_T0
#if __riscv_xlen == 32
TRAP_SAVE_INFO 1 1
#else
TRAP_SAVE_INFO 0 1
#endif
TRAP_CALL_C_ROUTINE
_trap_exit_rv32_hyp:
TRAP_RESTORE_GENERAL_REGS_EXCEPT_A0_T0
#if __riscv_xlen == 32
TRAP_RESTORE_MEPC_MSTATUS 1
#else
TRAP_RESTORE_MEPC_MSTATUS 0
#endif
TRAP_RESTORE_A0_T0
mret
#endif
.section .entry, "ax", %progbits
.align 3
@@ -706,7 +810,7 @@ _reset_regs:
/* flush the instruction cache */
fence.i
/* Reset all registers except ra, a0, a1, a2, a3 and a4 */
/* Reset all registers except ra, a0, a1 and a2 */
li sp, 0
li gp, 0
li tp, 0
@@ -715,6 +819,8 @@ _reset_regs:
li t2, 0
li s0, 0
li s1, 0
li a3, 0
li a4, 0
li a5, 0
li a6, 0
li a7, 0
@@ -736,27 +842,6 @@ _reset_regs:
ret
.section .rodata
.Lstack_corrupt_msg:
.string "stack smashing detected\n"
/* This will be called when the stack corruption is detected */
.section .text
.align 3
.globl __stack_chk_fail
.type __stack_chk_fail, %function
__stack_chk_fail:
la a0, .Lstack_corrupt_msg
call sbi_panic
/* Initial value of the stack guard variable */
.section .data
.align 3
.globl __stack_chk_guard
.type __stack_chk_guard, %object
__stack_chk_guard:
RISCV_PTR 0x95B5FF5A
#ifdef FW_FDT_PATH
.section .rodata
.align 4

View File

@@ -20,7 +20,6 @@
PROVIDE(_text_start = .);
*(.entry)
*(.text)
*(.text.*)
. = ALIGN(8);
PROVIDE(_text_end = .);
}
@@ -39,17 +38,19 @@
. = ALIGN(8);
}
.dynsym :
{
*(.dynsym)
}
. = ALIGN(0x1000); /* Ensure next section is page aligned */
.dynsym : {
PROVIDE(__dyn_sym_start = .);
*(.dynsym)
PROVIDE(__dyn_sym_end = .);
}
.rela.dyn : {
PROVIDE(__rela_dyn_start = .);
PROVIDE(__rel_dyn_start = .);
*(.rela*)
PROVIDE(__rela_dyn_end = .);
. = ALIGN(8);
PROVIDE(__rel_dyn_end = .);
}
PROVIDE(_rodata_end = .);

View File

@@ -11,6 +11,12 @@
#include "fw_base.S"
.section .entry, "ax", %progbits
.align 3
_bad_dynamic_info:
wfi
j _bad_dynamic_info
.section .entry, "ax", %progbits
.align 3
.global fw_boot_hart
@@ -24,10 +30,10 @@ fw_boot_hart:
/* Sanity checks */
li a1, FW_DYNAMIC_INFO_MAGIC_VALUE
REG_L a0, FW_DYNAMIC_INFO_MAGIC_OFFSET(a2)
bne a0, a1, _start_hang
bne a0, a1, _bad_dynamic_info
li a1, FW_DYNAMIC_INFO_VERSION_MAX
REG_L a0, FW_DYNAMIC_INFO_VERSION_OFFSET(a2)
bgt a0, a1, _start_hang
bgt a0, a1, _bad_dynamic_info
/* Read boot HART id */
li a1, FW_DYNAMIC_INFO_VERSION_2
@@ -123,7 +129,7 @@ fw_options:
REG_L a0, (a0)
ret
.section .data
.section .entry, "ax", %progbits
.align 3
_dynamic_next_arg1:
RISCV_PTR 0x0

View File

@@ -46,10 +46,6 @@ fw_save_info:
fw_next_arg1:
#ifdef FW_JUMP_FDT_ADDR
li a0, FW_JUMP_FDT_ADDR
#elif defined(FW_JUMP_FDT_OFFSET)
lla a0, _fw_start
li a1, FW_JUMP_FDT_OFFSET
add a0, a0, a1
#else
add a0, a1, zero
#endif
@@ -63,16 +59,8 @@ fw_next_arg1:
* The next address should be returned in 'a0'.
*/
fw_next_addr:
#ifdef FW_JUMP_ADDR
lla a0, _jump_addr
REG_L a0, (a0)
#elif defined(FW_JUMP_OFFSET)
lla a0, _fw_start
li a1, FW_JUMP_OFFSET
add a0, a0, a1
#else
#error "Must define at least FW_JUMP_ADDR or FW_JUMP_OFFSET"
#endif
ret
.section .entry, "ax", %progbits
@@ -98,9 +86,11 @@ fw_options:
add a0, zero, zero
ret
#ifdef FW_JUMP_ADDR
.section .rodata
#ifndef FW_JUMP_ADDR
#error "Must define FW_JUMP_ADDR"
#endif
.section .entry, "ax", %progbits
.align 3
_jump_addr:
RISCV_PTR FW_JUMP_ADDR
#endif

View File

@@ -46,10 +46,6 @@ fw_save_info:
fw_next_arg1:
#ifdef FW_PAYLOAD_FDT_ADDR
li a0, FW_PAYLOAD_FDT_ADDR
#elif defined(FW_PAYLOAD_FDT_OFFSET)
lla a0, _fw_start
li a1, FW_PAYLOAD_FDT_OFFSET
add a0, a0, a1
#else
add a0, a1, zero
#endif

View File

@@ -13,10 +13,19 @@ firmware-cflags-y +=
firmware-asflags-y +=
firmware-ldflags-y +=
ifndef FW_PIC
FW_PIC := $(OPENSBI_LD_PIE)
endif
ifeq ($(FW_PIC),y)
firmware-genflags-y += -DFW_PIC
firmware-asflags-y += -fpic
firmware-cflags-y += -fPIE -pie
firmware-ldflags-y += -Wl,--no-dynamic-linker -Wl,-pie
endif
ifdef FW_TEXT_START
firmware-genflags-y += -DFW_TEXT_START=$(FW_TEXT_START)
else
firmware-genflags-y += -DFW_TEXT_START=0x0
endif
ifdef FW_FDT_PATH
@@ -29,15 +38,9 @@ endif
firmware-bins-$(FW_DYNAMIC) += fw_dynamic.bin
firmware-bins-$(FW_JUMP) += fw_jump.bin
ifdef FW_JUMP_OFFSET
firmware-genflags-$(FW_JUMP) += -DFW_JUMP_OFFSET=$(FW_JUMP_OFFSET)
endif
ifdef FW_JUMP_ADDR
firmware-genflags-$(FW_JUMP) += -DFW_JUMP_ADDR=$(FW_JUMP_ADDR)
endif
ifdef FW_JUMP_FDT_OFFSET
firmware-genflags-$(FW_JUMP) += -DFW_JUMP_FDT_OFFSET=$(FW_JUMP_FDT_OFFSET)
endif
ifdef FW_JUMP_FDT_ADDR
firmware-genflags-$(FW_JUMP) += -DFW_JUMP_FDT_ADDR=$(FW_JUMP_FDT_ADDR)
endif
@@ -56,9 +59,6 @@ ifdef FW_PAYLOAD_ALIGN
firmware-genflags-$(FW_PAYLOAD) += -DFW_PAYLOAD_ALIGN=$(FW_PAYLOAD_ALIGN)
endif
ifdef FW_PAYLOAD_FDT_OFFSET
firmware-genflags-$(FW_PAYLOAD) += -DFW_PAYLOAD_FDT_OFFSET=$(FW_PAYLOAD_FDT_OFFSET)
endif
ifdef FW_PAYLOAD_FDT_ADDR
firmware-genflags-$(FW_PAYLOAD) += -DFW_PAYLOAD_FDT_ADDR=$(FW_PAYLOAD_FDT_ADDR)
endif
@@ -66,12 +66,3 @@ endif
ifdef FW_OPTIONS
firmware-genflags-y += -DFW_OPTIONS=$(FW_OPTIONS)
endif
ifeq ($(CONFIG_STACK_PROTECTOR),y)
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR) := -fstack-protector
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_STRONG) := -fstack-protector-strong
stack-protector-cflags-$(CONFIG_STACK_PROTECTOR_ALL) := -fstack-protector-all
else
stack-protector-cflags-y := -fno-stack-protector
endif
firmware-cflags-y += $(stack-protector-cflags-y)

View File

@@ -30,18 +30,7 @@ _start:
/* Pick one hart to run the main boot sequence */
lla a3, _hart_lottery
li a2, 1
#ifdef __riscv_atomic
amoadd.w a3, a2, (a3)
#elif __riscv_zalrsc
_sc_fail:
lr.w t0, (a3)
addw t1, t0, a2
sc.w t1, t1, (a3)
bnez t1, _sc_fail
move a3, t0
#else
#error "need a or zalrsc"
#endif
bnez a3, _start_hang
/* Save a0 and a1 */
@@ -89,7 +78,7 @@ _start_hang:
wfi
j _start_hang
.section .data
.section .entry, "ax", %progbits
.align 3
_hart_lottery:
RISCV_PTR 0
@@ -97,18 +86,3 @@ _boot_a0:
RISCV_PTR 0
_boot_a1:
RISCV_PTR 0
/* This will be called when the stack corruption is detected */
.section .text
.align 3
.globl __stack_chk_fail
.type __stack_chk_fail, %function
.equ __stack_chk_fail, _start_hang
/* Initial value of the stack guard variable */
.section .data
.align 3
.globl __stack_chk_guard
.type __stack_chk_guard, %object
__stack_chk_guard:
RISCV_PTR 0x95B5FF5A

View File

@@ -8,49 +8,31 @@
*/
#include <sbi/sbi_ecall_interface.h>
#include <sbi/sbi_string.h>
struct sbiret {
unsigned long error;
unsigned long value;
};
#define SBI_ECALL(__eid, __fid, __a0, __a1, __a2) \
({ \
register unsigned long a0 asm("a0") = (unsigned long)(__a0); \
register unsigned long a1 asm("a1") = (unsigned long)(__a1); \
register unsigned long a2 asm("a2") = (unsigned long)(__a2); \
register unsigned long a6 asm("a6") = (unsigned long)(__fid); \
register unsigned long a7 asm("a7") = (unsigned long)(__eid); \
asm volatile("ecall" \
: "+r"(a0) \
: "r"(a1), "r"(a2), "r"(a6), "r"(a7) \
: "memory"); \
a0; \
})
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
unsigned long arg5)
{
struct sbiret ret;
#define SBI_ECALL_0(__eid, __fid) SBI_ECALL(__eid, __fid, 0, 0, 0)
#define SBI_ECALL_1(__eid, __fid, __a0) SBI_ECALL(__eid, __fid, __a0, 0, 0)
#define SBI_ECALL_2(__eid, __fid, __a0, __a1) SBI_ECALL(__eid, __fid, __a0, __a1, 0)
register unsigned long a0 asm ("a0") = (unsigned long)(arg0);
register unsigned long a1 asm ("a1") = (unsigned long)(arg1);
register unsigned long a2 asm ("a2") = (unsigned long)(arg2);
register unsigned long a3 asm ("a3") = (unsigned long)(arg3);
register unsigned long a4 asm ("a4") = (unsigned long)(arg4);
register unsigned long a5 asm ("a5") = (unsigned long)(arg5);
register unsigned long a6 asm ("a6") = (unsigned long)(fid);
register unsigned long a7 asm ("a7") = (unsigned long)(ext);
asm volatile ("ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
return ret;
}
#define sbi_ecall_console_putc(c) SBI_ECALL_1(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, (c))
static inline void sbi_ecall_console_puts(const char *str)
{
sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
sbi_strlen(str), (unsigned long)str, 0, 0, 0, 0);
}
static inline void sbi_ecall_shutdown(void)
{
sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET,
SBI_SRST_RESET_TYPE_SHUTDOWN, SBI_SRST_RESET_REASON_NONE,
0, 0, 0, 0);
while (str && *str)
sbi_ecall_console_putc(*str++);
}
#define wfi() \
@@ -61,6 +43,7 @@ static inline void sbi_ecall_shutdown(void)
void test_main(unsigned long a0, unsigned long a1)
{
sbi_ecall_console_puts("\nTest payload running\n");
sbi_ecall_shutdown();
sbi_ecall_console_puts("sbi_ecall_shutdown failed to execute.\n");
while (1)
wfi();
}

View File

@@ -79,12 +79,36 @@ struct fw_dynamic_info {
* Prevent modification of struct fw_dynamic_info from affecting
* FW_DYNAMIC_INFO_xxx_OFFSET
*/
assert_member_offset(struct fw_dynamic_info, magic, FW_DYNAMIC_INFO_MAGIC_OFFSET);
assert_member_offset(struct fw_dynamic_info, version, FW_DYNAMIC_INFO_VERSION_OFFSET);
assert_member_offset(struct fw_dynamic_info, next_addr, FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET);
assert_member_offset(struct fw_dynamic_info, next_mode, FW_DYNAMIC_INFO_NEXT_MODE_OFFSET);
assert_member_offset(struct fw_dynamic_info, options, FW_DYNAMIC_INFO_OPTIONS_OFFSET);
assert_member_offset(struct fw_dynamic_info, boot_hart, FW_DYNAMIC_INFO_BOOT_HART_OFFSET);
_Static_assert(
offsetof(struct fw_dynamic_info, magic)
== FW_DYNAMIC_INFO_MAGIC_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_MAGIC_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, version)
== FW_DYNAMIC_INFO_VERSION_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_VERSION_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, next_addr)
== FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_NEXT_ADDR_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, next_mode)
== FW_DYNAMIC_INFO_NEXT_MODE_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_NEXT_MODE_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, options)
== FW_DYNAMIC_INFO_OPTIONS_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_OPTIONS_OFFSET");
_Static_assert(
offsetof(struct fw_dynamic_info, boot_hart)
== FW_DYNAMIC_INFO_BOOT_HART_OFFSET,
"struct fw_dynamic_info definition has changed, please redefine "
"FW_DYNAMIC_INFO_BOOT_HART_OFFSET");
#endif

View File

@@ -101,14 +101,6 @@
__v; \
})
/* Variant of csr_read() that allows the compiler to cache the value. */
#define csr_read_relaxed(csr) \
({ \
register unsigned long __v; \
__asm__ ("csrr %0, " __ASM_STR(csr) : "=r"(__v)); \
__v; \
})
#define csr_write(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
@@ -156,26 +148,6 @@
: "memory"); \
})
#if __riscv_xlen == 64
#define __csrrw64(op, csr, csrh, val) (true ? op(csr, val) : (uint64_t)csrh)
#define __csrr64( op, csr, csrh) (true ? op(csr) : (uint64_t)csrh)
#define __csrw64( op, csr, csrh, val) (true ? op(csr, val) : (uint64_t)csrh)
#elif __riscv_xlen == 32
#define __csrrw64(op, csr, csrh, val) ( op(csr, val) | (uint64_t)op(csrh, val >> 32) << 32)
#define __csrr64( op, csr, csrh) ( op(csr) | (uint64_t)op(csrh) << 32)
#define __csrw64( op, csr, csrh, val) ({ op(csr, val); op(csrh, val >> 32); })
#endif
#define csr_swap64( csr, val) __csrrw64(csr_swap, csr, csr ## H, val)
#define csr_read64( csr) __csrr64 (csr_read, csr, csr ## H)
#define csr_read_relaxed64(csr) __csrr64 (csr_read_relaxed, csr, csr ## H)
#define csr_write64( csr, val) __csrw64 (csr_write, csr, csr ## H, val)
#define csr_read_set64( csr, val) __csrrw64(csr_read_set, csr, csr ## H, val)
#define csr_set64( csr, val) __csrw64 (csr_set, csr, csr ## H, val)
#define csr_clear64( csr, val) __csrw64 (csr_clear, csr, csr ## H, val)
#define csr_read_clear64( csr, val) __csrrw64(csr_read_clear, csr, csr ## H, val)
#define csr_clear64( csr, val) __csrw64 (csr_clear, csr, csr ## H, val)
unsigned long csr_read_num(int csr_num);
void csr_write_num(int csr_num, unsigned long val);
@@ -191,7 +163,7 @@ void csr_write_num(int csr_num, unsigned long val);
} while (0)
/* Get current HART id */
#define current_hartid() ((unsigned int)csr_read_relaxed(CSR_MHARTID))
#define current_hartid() ((unsigned int)csr_read(CSR_MHARTID))
/* determine CPU extension, return non-zero support */
int misa_extension_imp(char ext);
@@ -209,12 +181,6 @@ int misa_xlen(void);
/* Get RISC-V ISA string representation */
void misa_string(int xlen, char *out, unsigned int out_sz);
/* Disable pmp entry at a given index */
int pmp_disable(unsigned int n);
/* Check if the matching field is set */
int is_pmp_entry_mapped(unsigned long entry);
int pmp_set(unsigned int n, unsigned long prot, unsigned long addr,
unsigned long log2len);

View File

@@ -39,14 +39,14 @@ unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
unsigned long atomic_raw_xchg_ulong(volatile unsigned long *ptr,
unsigned long newval);
/**
* Set a bit in an atomic variable and return the value of bit before modify.
* Set a bit in an atomic variable and return the new value.
* @nr : Bit to set.
* @atom: atomic variable to modify
*/
int atomic_set_bit(int nr, atomic_t *atom);
/**
* Clear a bit in an atomic variable and return the value of bit before modify.
* Clear a bit in an atomic variable and return the new value.
* @nr : Bit to set.
* @atom: atomic variable to modify
*/
@@ -54,14 +54,14 @@ int atomic_set_bit(int nr, atomic_t *atom);
int atomic_clear_bit(int nr, atomic_t *atom);
/**
* Set a bit in any address and return the value of bit before modify.
* Set a bit in any address and return the new value .
* @nr : Bit to set.
* @addr: Address to modify
*/
int atomic_raw_set_bit(int nr, volatile unsigned long *addr);
/**
* Clear a bit in any address and return the value of bit before modify.
* Clear a bit in any address and return the new value .
* @nr : Bit to set.
* @addr: Address to modify
*/

View File

@@ -40,11 +40,7 @@
#define smp_wmb() RISCV_FENCE(w,w)
/* CPU relax for busy loop */
#define cpu_relax() \
do { \
unsigned long __t; \
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (__t)); \
} while (0)
#define cpu_relax() asm volatile ("" : : : "memory")
/* clang-format on */

View File

@@ -1,249 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro System, Inc.
*
* Authors:
* Himanshu Chauhan <hchauhan@ventanamicro.com>
*/
#ifndef __RISCV_DBTR_H__
#define __RISCV_DBTR_H__
#define RV_MAX_TRIGGERS 32
enum {
RISCV_DBTR_TRIG_NONE = 0,
RISCV_DBTR_TRIG_LEGACY,
RISCV_DBTR_TRIG_MCONTROL,
RISCV_DBTR_TRIG_ICOUNT,
RISCV_DBTR_TRIG_ITRIGGER,
RISCV_DBTR_TRIG_ETRIGGER,
RISCV_DBTR_TRIG_MCONTROL6,
};
#define RV_DBTR_BIT(_prefix, _name) \
RV_DBTR_##_prefix##_##_name##_BIT
#define RV_DBTR_BIT_MASK(_prefix, _name) \
RV_DBTR_##_prefix##_name##_BIT_MASK
#define RV_DBTR_DECLARE_BIT(_prefix, _name, _val) \
RV_DBTR_BIT(_prefix, _name) = _val
#define RV_DBTR_DECLARE_BIT_MASK(_prefix, _name, _width) \
RV_DBTR_BIT_MASK(_prefix, _name) = \
(((1UL << _width) - 1) << RV_DBTR_BIT(_prefix, _name))
#define CLEAR_DBTR_BIT(_target, _prefix, _bit_name) \
__clear_bit(RV_DBTR_BIT(_prefix, _bit_name), &_target)
#define SET_DBTR_BIT(_target, _prefix, _bit_name) \
__set_bit(RV_DBTR_BIT(_prefix, _bit_name), &_target)
/* Trigger Data 1 */
enum {
RV_DBTR_DECLARE_BIT(TDATA1, DATA, 0),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT(TDATA1, DMODE, 59),
RV_DBTR_DECLARE_BIT(TDATA1, TYPE, 60),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT(TDATA1, DMODE, 27),
RV_DBTR_DECLARE_BIT(TDATA1, TYPE, 28),
#else
#error "Unknown __riscv_xlen"
#endif
};
enum {
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT_MASK(TDATA1, DATA, 59),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT_MASK(TDATA1, DATA, 27),
#else
#error "Unknown __riscv_xlen"
#endif
RV_DBTR_DECLARE_BIT_MASK(TDATA1, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(TDATA1, TYPE, 4),
};
/* MC - Match Control Type Register */
enum {
RV_DBTR_DECLARE_BIT(MC, LOAD, 0),
RV_DBTR_DECLARE_BIT(MC, STORE, 1),
RV_DBTR_DECLARE_BIT(MC, EXEC, 2),
RV_DBTR_DECLARE_BIT(MC, U, 3),
RV_DBTR_DECLARE_BIT(MC, S, 4),
RV_DBTR_DECLARE_BIT(MC, RES2, 5),
RV_DBTR_DECLARE_BIT(MC, M, 6),
RV_DBTR_DECLARE_BIT(MC, MATCH, 7),
RV_DBTR_DECLARE_BIT(MC, CHAIN, 11),
RV_DBTR_DECLARE_BIT(MC, ACTION, 12),
RV_DBTR_DECLARE_BIT(MC, SIZELO, 16),
RV_DBTR_DECLARE_BIT(MC, TIMING, 18),
RV_DBTR_DECLARE_BIT(MC, SELECT, 19),
RV_DBTR_DECLARE_BIT(MC, HIT, 20),
#if __riscv_xlen >= 64
RV_DBTR_DECLARE_BIT(MC, SIZEHI, 21),
#endif
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT(MC, MASKMAX, 53),
RV_DBTR_DECLARE_BIT(MC, DMODE, 59),
RV_DBTR_DECLARE_BIT(MC, TYPE, 60),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT(MC, MASKMAX, 21),
RV_DBTR_DECLARE_BIT(MC, DMODE, 27),
RV_DBTR_DECLARE_BIT(MC, TYPE, 28),
#else
#error "Unknown __riscv_xlen"
#endif
};
enum {
RV_DBTR_DECLARE_BIT_MASK(MC, LOAD, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, STORE, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, EXEC, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, U, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, S, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, RES2, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, M, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, MATCH, 4),
RV_DBTR_DECLARE_BIT_MASK(MC, CHAIN, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, ACTION, 4),
RV_DBTR_DECLARE_BIT_MASK(MC, SIZELO, 2),
RV_DBTR_DECLARE_BIT_MASK(MC, TIMING, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, SELECT, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, HIT, 1),
#if __riscv_xlen >= 64
RV_DBTR_DECLARE_BIT_MASK(MC, SIZEHI, 2),
#endif
RV_DBTR_DECLARE_BIT_MASK(MC, MASKMAX, 6),
RV_DBTR_DECLARE_BIT_MASK(MC, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(MC, TYPE, 4),
};
/* MC6 - Match Control 6 Type Register */
enum {
RV_DBTR_DECLARE_BIT(MC6, LOAD, 0),
RV_DBTR_DECLARE_BIT(MC6, STORE, 1),
RV_DBTR_DECLARE_BIT(MC6, EXEC, 2),
RV_DBTR_DECLARE_BIT(MC6, U, 3),
RV_DBTR_DECLARE_BIT(MC6, S, 4),
RV_DBTR_DECLARE_BIT(MC6, RES2, 5),
RV_DBTR_DECLARE_BIT(MC6, M, 6),
RV_DBTR_DECLARE_BIT(MC6, MATCH, 7),
RV_DBTR_DECLARE_BIT(MC6, CHAIN, 11),
RV_DBTR_DECLARE_BIT(MC6, ACTION, 12),
RV_DBTR_DECLARE_BIT(MC6, SIZE, 16),
RV_DBTR_DECLARE_BIT(MC6, TIMING, 20),
RV_DBTR_DECLARE_BIT(MC6, SELECT, 21),
RV_DBTR_DECLARE_BIT(MC6, HIT, 22),
RV_DBTR_DECLARE_BIT(MC6, VU, 23),
RV_DBTR_DECLARE_BIT(MC6, VS, 24),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT(MC6, DMODE, 59),
RV_DBTR_DECLARE_BIT(MC6, TYPE, 60),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT(MC6, DMODE, 27),
RV_DBTR_DECLARE_BIT(MC6, TYPE, 28),
#else
#error "Unknown __riscv_xlen"
#endif
};
enum {
RV_DBTR_DECLARE_BIT_MASK(MC6, LOAD, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, STORE, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, EXEC, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, U, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, S, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, RES2, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, M, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, MATCH, 4),
RV_DBTR_DECLARE_BIT_MASK(MC6, CHAIN, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, ACTION, 4),
RV_DBTR_DECLARE_BIT_MASK(MC6, SIZE, 4),
RV_DBTR_DECLARE_BIT_MASK(MC6, TIMING, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, SELECT, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, HIT, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, VU, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, VS, 1),
#if __riscv_xlen == 64
RV_DBTR_DECLARE_BIT_MASK(MC6, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, TYPE, 4),
#elif __riscv_xlen == 32
RV_DBTR_DECLARE_BIT_MASK(MC6, DMODE, 1),
RV_DBTR_DECLARE_BIT_MASK(MC6, TYPE, 4),
#else
#error "Unknown __riscv_xlen"
#endif
};
#define RV_DBTR_SET_TDATA1_TYPE(_t1, _type) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(TDATA1, TYPE); \
_t1 |= (((unsigned long)_type \
<< RV_DBTR_BIT(TDATA1, TYPE)) \
& RV_DBTR_BIT_MASK(TDATA1, TYPE)); \
}while (0);
#define RV_DBTR_SET_MC_TYPE(_t1, _type) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(MC, TYPE); \
_t1 |= (((unsigned long)_type \
<< RV_DBTR_BIT(MC, TYPE)) \
& RV_DBTR_BIT_MASK(MC, TYPE)); \
}while (0);
#define RV_DBTR_SET_MC6_TYPE(_t1, _type) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(MC6, TYPE); \
_t1 |= (((unsigned long)_type \
<< RV_DBTR_BIT(MC6, TYPE)) \
& RV_DBTR_BIT_MASK(MC6, TYPE)); \
}while (0);
#define RV_DBTR_SET_MC_EXEC(_t1) \
SET_DBTR_BIT(_t1, MC, EXEC)
#define RV_DBTR_SET_MC_LOAD(_t1) \
SET_DBTR_BIT(_t1, MC, LOAD)
#define RV_DBTR_SET_MC_STORE(_t1) \
SET_DBTR_BIT(_t1, MC, STORE)
#define RV_DBTR_SET_MC_SIZELO(_t1, _val) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(MC, SIZELO); \
_t1 |= ((_val << RV_DBTR_BIT(MC, SIZELO)) \
& RV_DBTR_BIT_MASK(MC, SIZELO)); \
} while(0);
#define RV_DBTR_SET_MC_SIZEHI(_t1, _val) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(MC, SIZEHI); \
_t1 |= ((_val << RV_DBTR_BIT(MC, SIZEHI)) \
& RV_DBTR_BIT_MASK(MC, SIZEHI)); \
} while(0);
#define RV_DBTR_SET_MC6_EXEC(_t1) \
SET_DBTR_BIT(_t1, MC6, EXEC)
#define RV_DBTR_SET_MC6_LOAD(_t1) \
SET_DBTR_BIT(_t1, MC6, LOAD)
#define RV_DBTR_SET_MC6_STORE(_t1) \
SET_DBTR_BIT(_t1, MC6, STORE)
#define RV_DBTR_SET_MC6_SIZE(_t1, _val) \
do { \
_t1 &= ~RV_DBTR_BIT_MASK(MC6, SIZE); \
_t1 |= ((_val << RV_DBTR_BIT(MC6, SIZE)) \
& RV_DBTR_BIT_MASK(MC6, SIZE)); \
} while(0);
typedef unsigned long riscv_dbtr_tdata1_mcontrol_t;
typedef unsigned long riscv_dbtr_tdata1_mcontrol6_t;
typedef unsigned long riscv_dbtr_tdata1_t;
#endif /* __RISCV_DBTR_H__ */

View File

@@ -1,6 +1,14 @@
#ifndef __RISCV_ELF_H__
#define __RISCV_ELF_H__
#include <sbi/riscv_asm.h>
#define R_RISCV_32 1
#define R_RISCV_64 2
#define R_RISCV_RELATIVE 3
#define RELOC_TYPE __REG_SEL(R_RISCV_64, R_RISCV_32)
#define SYM_INDEX __REG_SEL(0x20, 0x8)
#define SYM_SIZE __REG_SEL(0x18,0x10)
#endif

View File

@@ -32,8 +32,6 @@
#define MSTATUS_TVM _UL(0x00100000)
#define MSTATUS_TW _UL(0x00200000)
#define MSTATUS_TSR _UL(0x00400000)
#define MSTATUS_SPELP _UL(0x00800000)
#define MSTATUS_SDT _UL(0x01000000)
#define MSTATUS32_SD _UL(0x80000000)
#if __riscv_xlen == 64
#define MSTATUS_UXL _ULL(0x0000000300000000)
@@ -43,16 +41,12 @@
#define MSTATUS_GVA _ULL(0x0000004000000000)
#define MSTATUS_GVA_SHIFT 38
#define MSTATUS_MPV _ULL(0x0000008000000000)
#define MSTATUS_MPELP _ULL(0x0000020000000000)
#define MSTATUS_MDT _ULL(0x0000040000000000)
#else
#define MSTATUSH_SBE _UL(0x00000010)
#define MSTATUSH_MBE _UL(0x00000020)
#define MSTATUSH_GVA _UL(0x00000040)
#define MSTATUSH_GVA_SHIFT 6
#define MSTATUSH_MPV _UL(0x00000080)
#define MSTATUSH_MPELP _UL(0x00000200)
#define MSTATUSH_MDT _UL(0x00000400)
#endif
#define MSTATUS32_SD _UL(0x80000000)
#define MSTATUS64_SD _ULL(0x8000000000000000)
@@ -86,10 +80,6 @@
#define HSTATUS_GVA _UL(0x00000040)
#define HSTATUS_VSBE _UL(0x00000020)
#define MTVEC_MODE _UL(0x00000003)
#define MCAUSE_IRQ_MASK (_UL(1) << (__riscv_xlen - 1))
#define IRQ_S_SOFT 1
#define IRQ_VS_SOFT 2
#define IRQ_M_SOFT 3
@@ -215,19 +205,15 @@
#endif
#define MHPMEVENT_SSCOF_MASK _ULL(0xFF00000000000000)
#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
#if __riscv_xlen > 32
#define ENVCFG_STCE (_ULL(1) << 63)
#define ENVCFG_PBMTE (_ULL(1) << 62)
#define ENVCFG_ADUE_SHIFT 61
#define ENVCFG_ADUE (_ULL(1) << ENVCFG_ADUE_SHIFT)
#define ENVCFG_CDE (_ULL(1) << 60)
#define ENVCFG_DTE_SHIFT 59
#define ENVCFG_DTE (_ULL(1) << ENVCFG_DTE_SHIFT)
#define ENVCFG_PMM (_ULL(0x3) << 32)
#define ENVCFG_PMM_PMLEN_0 (_ULL(0x0) << 32)
#define ENVCFG_PMM_PMLEN_7 (_ULL(0x2) << 32)
#define ENVCFG_PMM_PMLEN_16 (_ULL(0x3) << 32)
#else
#define ENVCFGH_STCE (_UL(1) << 31)
#define ENVCFGH_PBMTE (_UL(1) << 30)
#endif
#define ENVCFG_CBZE (_UL(1) << 7)
#define ENVCFG_CBCFE (_UL(1) << 6)
#define ENVCFG_CBIE_SHIFT 4
@@ -235,10 +221,6 @@
#define ENVCFG_CBIE_ILL _UL(0x0)
#define ENVCFG_CBIE_FLUSH _UL(0x1)
#define ENVCFG_CBIE_INV _UL(0x3)
#define ENVCFG_SSE_SHIFT 3
#define ENVCFG_SSE (_UL(1) << ENVCFG_SSE_SHIFT)
#define ENVCFG_LPE_SHIFT 2
#define ENVCFG_LPE (_UL(1) << ENVCFG_LPE_SHIFT)
#define ENVCFG_FIOM _UL(0x1)
/* ===== User-level CSRs ===== */
@@ -247,7 +229,6 @@
#define CSR_USTATUS 0x000
#define CSR_UIE 0x004
#define CSR_UTVEC 0x005
#define CSR_SSP 0x011
/* User Trap Handling (N-extension) */
#define CSR_USCRATCH 0x040
@@ -338,9 +319,6 @@
/* Supervisor Configuration */
#define CSR_SENVCFG 0x10a
/* Supervisor Conter Inhibit */
#define CSR_SCOUNTINHIBIT 0x120
/* Supervisor Trap Handling */
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
@@ -355,14 +333,9 @@
/* Supervisor Protection and Translation */
#define CSR_SATP 0x180
/* Supervisor Indirect Register Alias */
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
#define CSR_SIREG2 0x152
#define CSR_SIREG3 0x153
#define CSR_SIREG4 0x155
#define CSR_SIREG5 0x156
#define CSR_SIREG6 0x157
/* Supervisor-Level Interrupts (AIA) */
#define CSR_STOPEI 0x15c
@@ -378,17 +351,6 @@
#define CSR_SSTATEEN2 0x10E
#define CSR_SSTATEEN3 0x10F
/* Machine-Level Control transfer records CSRs */
#define CSR_MCTRCTL 0x34e
/* Supervisor-Level Control transfer records CSRs */
#define CSR_SCTRCTL 0x14e
#define CSR_SCTRSTATUS 0x14f
#define CSR_SCTRDEPTH 0x15f
/* VS-Level Control transfer records CSRs */
#define CSR_VSCTRCTL 0x24e
/* ===== Hypervisor-level CSRs ===== */
/* Hypervisor Trap Setup (H-extension) */
@@ -434,14 +396,9 @@
#define CSR_HVIPRIO1 0x646
#define CSR_HVIPRIO2 0x647
/* Virtual Supervisor Indirect Alias */
/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
#define CSR_VSISELECT 0x250
#define CSR_VSIREG 0x251
#define CSR_VSIREG2 0x252
#define CSR_VSIREG3 0x253
#define CSR_VSIREG4 0x255
#define CSR_VSIREG5 0x256
#define CSR_VSIREG6 0x257
/* VS-Level Interrupts (H-extension with AIA) */
#define CSR_VSTOPEI 0x25c
@@ -473,7 +430,6 @@
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
#define CSR_MCONFIGPTR 0xf15
/* Machine Trap Setup */
#define CSR_MSTATUS 0x300
@@ -646,8 +602,6 @@
/* Machine Counter Setup */
#define CSR_MCOUNTINHIBIT 0x320
#define CSR_MCYCLECFG 0x321
#define CSR_MINSTRETCFG 0x322
#define CSR_MHPMEVENT3 0x323
#define CSR_MHPMEVENT4 0x324
#define CSR_MHPMEVENT5 0x325
@@ -679,8 +633,6 @@
#define CSR_MHPMEVENT31 0x33f
/* For RV32 */
#define CSR_MCYCLECFGH 0x721
#define CSR_MINSTRETCFGH 0x722
#define CSR_MHPMEVENT3H 0x723
#define CSR_MHPMEVENT4H 0x724
#define CSR_MHPMEVENT5H 0x725
@@ -711,21 +663,6 @@
#define CSR_MHPMEVENT30H 0x73e
#define CSR_MHPMEVENT31H 0x73f
/* Machine Security Configuration CSR (mseccfg) */
#define CSR_MSECCFG 0x747
#define CSR_MSECCFGH 0x757
#define MSECCFG_MML_SHIFT (0)
#define MSECCFG_MML (_UL(1) << MSECCFG_MML_SHIFT)
#define MSECCFG_MMWP_SHIFT (1)
#define MSECCFG_MMWP (_UL(1) << MSECCFG_MMWP_SHIFT)
#define MSECCFG_RLB_SHIFT (2)
#define MSECCFG_RLB (_UL(1) << MSECCFG_RLB_SHIFT)
#define MSECCFG_USEED_SHIFT (8)
#define MSECCFG_USEED (_UL(1) << MSECCFG_USEED_SHIFT)
#define MSECCFG_SSEED_SHIFT (9)
#define MSECCFG_SSEED (_UL(1) << MSECCFG_SSEED_SHIFT)
/* Counter Overflow CSR */
#define CSR_SCOUNTOVF 0xda0
@@ -734,7 +671,6 @@
#define CSR_TDATA1 0x7a1
#define CSR_TDATA2 0x7a2
#define CSR_TDATA3 0x7a3
#define CSR_TINFO 0x7a4
/* Debug Mode Registers */
#define CSR_DCSR 0x7b0
@@ -742,14 +678,9 @@
#define CSR_DSCRATCH0 0x7b2
#define CSR_DSCRATCH1 0x7b3
/* Machine Indirect Register Alias */
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_MISELECT 0x350
#define CSR_MIREG 0x351
#define CSR_MIREG2 0x352
#define CSR_MIREG3 0x353
#define CSR_MIREG4 0x355
#define CSR_MIREG5 0x356
#define CSR_MIREG6 0x357
/* Machine-Level Interrupts (AIA) */
#define CSR_MTOPEI 0x35c
@@ -777,12 +708,6 @@
#define CSR_MVIPH 0x319
#define CSR_MIPH 0x354
/* Vector extension registers */
#define CSR_VSTART 0x8
#define CSR_VL 0xc20
#define CSR_VTYPE 0xc21
#define CSR_VLENB 0xc22
/* ===== Trap/Exception Causes ===== */
#define CAUSE_MISALIGNED_FETCH 0x0
@@ -800,8 +725,6 @@
#define CAUSE_FETCH_PAGE_FAULT 0xc
#define CAUSE_LOAD_PAGE_FAULT 0xd
#define CAUSE_STORE_PAGE_FAULT 0xf
#define CAUSE_DOUBLE_TRAP 0x10
#define CAUSE_SW_CHECK_EXCP 0x12
#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14
#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15
#define CAUSE_VIRTUAL_INST_FAULT 0x16
@@ -813,8 +736,6 @@
#define SMSTATEEN0_CS (_ULL(1) << SMSTATEEN0_CS_SHIFT)
#define SMSTATEEN0_FCSR_SHIFT 1
#define SMSTATEEN0_FCSR (_ULL(1) << SMSTATEEN0_FCSR_SHIFT)
#define SMSTATEEN0_CTR_SHIFT 54
#define SMSTATEEN0_CTR (_ULL(1) << SMSTATEEN0_CTR_SHIFT)
#define SMSTATEEN0_CONTEXT_SHIFT 57
#define SMSTATEEN0_CONTEXT (_ULL(1) << SMSTATEEN0_CONTEXT_SHIFT)
#define SMSTATEEN0_IMSIC_SHIFT 58
@@ -900,378 +821,17 @@
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
#define INSN_MATCH_C_LHU 0x8400
#define INSN_MASK_C_LHU 0xfc43
#define INSN_MATCH_C_LH 0x8440
#define INSN_MASK_C_LH 0xfc43
#define INSN_MATCH_C_SH 0x8c00
#define INSN_MASK_C_SH 0xfc43
#define INSN_MASK_WFI 0xffffff00
#define INSN_MATCH_WFI 0x10500000
#define INSN_MASK_FENCE_TSO 0xffffffff
#define INSN_MATCH_FENCE_TSO 0x8330000f
#define INSN_MASK_VECTOR_UNIT_STRIDE 0xfdf0707f
#define INSN_MASK_VECTOR_FAULT_ONLY_FIRST 0xfdf0707f
#define INSN_MASK_VECTOR_STRIDE 0xfc00707f
#define INSN_MASK_VECTOR_WHOLE_REG 0xfff0707f
#define INSN_MASK_VECTOR_INDEXED 0xfc00707f
#define INSN_MATCH_VLUXSEG(n, bits) ((((n) - 1) << 29) | 0x04000007 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VSUXSEG(n, bits) ((((n) - 1) << 29) | 0x04000027 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VLOXSEG(n, bits) ((((n) - 1) << 29) | 0x0c000007 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VSOXSEG(n, bits) ((((n) - 1) << 29) | 0x0c000027 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VLSSEG(n, bits) ((((n) - 1) << 29) | 0x08000007 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VSSSEG(n, bits) ((((n) - 1) << 29) | 0x08000027 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VSSEG(n, bits) ((((n) - 1) << 29) | 0x00004027 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VLSEG(n, bits) ((((n) - 1) << 29) | 0x00004007 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VLSEGFF(n, bits) ((((n) - 1) << 29) | 0x1000007 | \
((bits) == 16 ? 5 : (bits) == 32 ? 6 : 7) << 12)
#define INSN_MATCH_VLE16V 0x00005007
#define INSN_MATCH_VLE32V 0x00006007
#define INSN_MATCH_VLE64V 0x00007007
#define INSN_MATCH_VSE16V 0x00005027
#define INSN_MATCH_VSE32V 0x00006027
#define INSN_MATCH_VSE64V 0x00007027
#define INSN_MATCH_VLSE16V 0x08005007
#define INSN_MATCH_VLSE32V 0x08006007
#define INSN_MATCH_VLSE64V 0x08007007
#define INSN_MATCH_VSSE16V 0x08005027
#define INSN_MATCH_VSSE32V 0x08006027
#define INSN_MATCH_VSSE64V 0x08007027
#define INSN_MATCH_VLOXEI16V 0x0c005007
#define INSN_MATCH_VLOXEI32V 0x0c006007
#define INSN_MATCH_VLOXEI64V 0x0c007007
#define INSN_MATCH_VSOXEI16V 0x0c005027
#define INSN_MATCH_VSOXEI32V 0x0c006027
#define INSN_MATCH_VSOXEI64V 0x0c007027
#define INSN_MATCH_VLUXEI16V 0x04005007
#define INSN_MATCH_VLUXEI32V 0x04006007
#define INSN_MATCH_VLUXEI64V 0x04007007
#define INSN_MATCH_VSUXEI16V 0x04005027
#define INSN_MATCH_VSUXEI32V 0x04006027
#define INSN_MATCH_VSUXEI64V 0x04007027
#define INSN_MATCH_VLE16FFV 0x01005007
#define INSN_MATCH_VLE32FFV 0x01006007
#define INSN_MATCH_VLE64FFV 0x01007007
#define INSN_MATCH_VL1RE8V 0x02800007
#define INSN_MATCH_VL1RE16V 0x02805007
#define INSN_MATCH_VL1RE32V 0x02806007
#define INSN_MATCH_VL1RE64V 0x02807007
#define INSN_MATCH_VL2RE8V 0x22800007
#define INSN_MATCH_VL2RE16V 0x22805007
#define INSN_MATCH_VL2RE32V 0x22806007
#define INSN_MATCH_VL2RE64V 0x22807007
#define INSN_MATCH_VL4RE8V 0x62800007
#define INSN_MATCH_VL4RE16V 0x62805007
#define INSN_MATCH_VL4RE32V 0x62806007
#define INSN_MATCH_VL4RE64V 0x62807007
#define INSN_MATCH_VL8RE8V 0xe2800007
#define INSN_MATCH_VL8RE16V 0xe2805007
#define INSN_MATCH_VL8RE32V 0xe2806007
#define INSN_MATCH_VL8RE64V 0xe2807007
#define INSN_MATCH_VS1RV 0x02800027
#define INSN_MATCH_VS2RV 0x22800027
#define INSN_MATCH_VS4RV 0x62800027
#define INSN_MATCH_VS8RV 0xe2800027
#define INSN_OPCODE_MASK 0x7f
#define INSN_OPCODE_VECTOR_LOAD 0x07
#define INSN_OPCODE_VECTOR_STORE 0x27
#define INSN_OPCODE_AMO 0x2f
#define IS_VECTOR_LOAD_STORE(insn) \
((((insn) & INSN_OPCODE_MASK) == INSN_OPCODE_VECTOR_LOAD) || \
(((insn) & INSN_OPCODE_MASK) == INSN_OPCODE_VECTOR_STORE))
#define IS_VECTOR_INSN_MATCH(insn, match, mask) \
(((insn) & (mask)) == ((match) & (mask)))
#define IS_UNIT_STRIDE_MATCH(insn, match) \
IS_VECTOR_INSN_MATCH(insn, match, INSN_MASK_VECTOR_UNIT_STRIDE)
#define IS_STRIDE_MATCH(insn, match) \
IS_VECTOR_INSN_MATCH(insn, match, INSN_MASK_VECTOR_STRIDE)
#define IS_INDEXED_MATCH(insn, match) \
IS_VECTOR_INSN_MATCH(insn, match, INSN_MASK_VECTOR_INDEXED)
#define IS_FAULT_ONLY_FIRST_MATCH(insn, match) \
IS_VECTOR_INSN_MATCH(insn, match, INSN_MASK_VECTOR_FAULT_ONLY_FIRST)
#define IS_WHOLE_REG_MATCH(insn, match) \
IS_VECTOR_INSN_MATCH(insn, match, INSN_MASK_VECTOR_WHOLE_REG)
#define IS_UNIT_STRIDE_LOAD(insn) ( \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLE16V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLE32V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLE64V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(2, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(3, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(4, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(5, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(6, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(7, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(8, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(2, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(3, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(4, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(5, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(6, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(7, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(8, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(2, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(3, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(4, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(5, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(6, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(7, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VLSEG(8, 64)))
#define IS_UNIT_STRIDE_STORE(insn) ( \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSE16V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSE32V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSE64V) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(2, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(3, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(4, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(5, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(6, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(7, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(8, 16)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(2, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(3, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(4, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(5, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(6, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(7, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(8, 32)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(2, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(3, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(4, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(5, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(6, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(7, 64)) || \
IS_UNIT_STRIDE_MATCH(insn, INSN_MATCH_VSSEG(8, 64)))
#define IS_STRIDE_LOAD(insn) ( \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSE16V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSE32V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSE64V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(2, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(3, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(4, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(5, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(6, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(7, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(8, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(2, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(3, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(4, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(5, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(6, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(7, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(8, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(2, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(3, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(4, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(5, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(6, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(7, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VLSSEG(8, 64)))
#define IS_STRIDE_STORE(insn) ( \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSE16V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSE32V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSE64V) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(2, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(3, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(4, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(5, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(6, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(7, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(8, 16)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(2, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(3, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(4, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(5, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(6, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(7, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(8, 32)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(2, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(3, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(4, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(5, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(6, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(7, 64)) || \
IS_STRIDE_MATCH(insn, INSN_MATCH_VSSSEG(8, 64)))
#define IS_INDEXED_LOAD(insn) ( \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXEI16V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXEI32V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXEI64V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXEI16V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXEI32V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXEI64V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(2, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(3, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(4, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(5, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(6, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(7, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(8, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(2, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(3, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(4, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(5, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(6, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(7, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(8, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(2, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(3, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(4, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(5, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(6, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(7, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLUXSEG(8, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(2, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(3, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(4, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(5, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(6, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(7, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(8, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(2, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(3, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(4, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(5, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(6, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(7, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(8, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(2, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(3, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(4, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(5, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(6, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(7, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VLOXSEG(8, 64)))
#define IS_INDEXED_STORE(insn) ( \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXEI16V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXEI32V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXEI64V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXEI16V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXEI32V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXEI64V) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(2, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(3, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(4, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(5, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(6, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(7, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(8, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(2, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(3, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(4, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(5, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(6, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(7, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(8, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(2, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(3, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(4, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(5, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(6, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(7, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSUXSEG(8, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(2, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(3, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(4, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(5, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(6, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(7, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(8, 16)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(2, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(3, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(4, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(5, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(6, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(7, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(8, 32)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(2, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(3, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(4, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(5, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(6, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(7, 64)) || \
IS_INDEXED_MATCH(insn, INSN_MATCH_VSOXSEG(8, 64)))
#define IS_FAULT_ONLY_FIRST_LOAD(insn) ( \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLE16FFV) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLE32FFV) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLE64FFV) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(2, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(3, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(4, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(5, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(6, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(7, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(8, 16)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(2, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(3, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(4, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(5, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(6, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(7, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(8, 32)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(2, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(3, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(4, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(5, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(6, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(7, 64)) || \
IS_FAULT_ONLY_FIRST_MATCH(insn, INSN_MATCH_VLSEGFF(8, 64)))
#define IS_WHOLE_REG_LOAD(insn) ( \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL1RE8V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL1RE16V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL1RE32V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL1RE64V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL2RE8V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL2RE16V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL2RE32V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL2RE64V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL4RE8V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL4RE16V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL4RE32V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL4RE64V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL8RE8V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL8RE16V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL8RE32V) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VL8RE64V))
#define IS_WHOLE_REG_STORE(insn) ( \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VS1RV) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VS2RV) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VS4RV) || \
IS_WHOLE_REG_MATCH(insn, INSN_MATCH_VS8RV))
#if __riscv_xlen == 64
/* 64-bit read for VS-stage address translation (RV64) */
#define INSN_PSEUDO_VS_LOAD 0x00003000
/* 64-bit write for VS-stage address translation (RV64) */
#define INSN_PSEUDO_VS_STORE 0x00003020
@@ -1287,25 +847,6 @@
#error "Unexpected __riscv_xlen"
#endif
#define MASK_FUNCT3 0x7000
#define SHIFT_FUNCT3 12
#define MASK_RS1 0xf8000
#define MASK_RS2 0x1f00000
#define MASK_RD 0xf80
#define MASK_CSR 0xfff00000
#define SHIFT_CSR 20
#define MASK_AQRL 0x06000000
#define SHIFT_AQRL 25
#define VM_MASK 0x1
#define VIEW_MASK 0x3
#define VSEW_MASK 0x3
#define VLMUL_MASK 0x7
#define VD_MASK 0x1f
#define VS2_MASK 0x1f
#define INSN_16BIT_MASK 0x3
#define INSN_32BIT_MASK 0x1c
@@ -1317,12 +858,13 @@
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#define SH_VSEW 3
#define SH_VIEW 12
#define SH_VD 7
#define SH_VS2 20
#define SH_VM 25
#define SH_MEW 28
#if __riscv_xlen == 64
#define LOG_REGBYTES 3
#else
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define SH_RD 7
#define SH_RS1 15
#define SH_RS2 20
@@ -1351,39 +893,28 @@
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define GET_FUNC3(insn) ((insn & MASK_FUNCT3) >> SHIFT_FUNCT3)
#define GET_RM(insn) GET_FUNC3(insn)
#define GET_RS1_NUM(insn) ((insn & MASK_RS1) >> SH_RS1)
#define GET_RS2_NUM(insn) ((insn & MASK_RS2) >> SH_RS2)
#define GET_RS1S_NUM(insn) RVC_RS1S(insn)
#define GET_RS2S_NUM(insn) RVC_RS2S(insn)
#define GET_RS2C_NUM(insn) RVC_RS2(insn)
#define GET_RD_NUM(insn) ((insn & MASK_RD) >> SH_RD)
#define GET_CSR_NUM(insn) ((insn & MASK_CSR) >> SHIFT_CSR)
#define GET_AQRL(insn) ((insn & MASK_AQRL) >> SHIFT_AQRL)
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
#define GET_RM(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
#define IS_MASKED(insn) (((insn >> SH_VM) & VM_MASK) == 0)
#define GET_VD(insn) ((insn >> SH_VD) & VD_MASK)
#define GET_VS2(insn) ((insn >> SH_VS2) & VS2_MASK)
#define GET_VIEW(insn) (((insn) >> SH_VIEW) & VIEW_MASK)
#define GET_MEW(insn) (((insn) >> SH_MEW) & 1)
#define GET_VSEW(vtype) (((vtype) >> SH_VSEW) & VSEW_MASK)
#define GET_VLMUL(vtype) ((vtype) & VLMUL_MASK)
#define GET_LEN(view) (1UL << (view))
#define GET_NF(insn) (1 + ((insn >> 29) & 7))
#define GET_VEMUL(vlmul, view, vsew) ((vlmul + view - vsew) & 7)
#define GET_EMUL(vemul) (1UL << ((vemul) >= 4 ? 0 : (vemul)))
#define CSRRW 1
#define CSRRS 2
#define CSRRC 3
#define CSRRWI 5
#define CSRRSI 6
#define CSRRCI 7
#define MASK_FUNCT3 0x7000
/* clang-format on */

View File

@@ -15,6 +15,7 @@
#include <sbi/sbi_types.h>
#define GET_PRECISION(insn) (((insn) >> 25) & 3)
#define GET_RM(insn) (((insn) >> 12) & 7)
#define PRECISION_S 0
#define PRECISION_D 1
@@ -83,7 +84,7 @@
#define GET_FFLAGS() csr_read(CSR_FFLAGS)
#define SET_FFLAGS(value) csr_write(CSR_FFLAGS, (value))
#define SET_FS_DIRTY(regs) (regs->mstatus |= MSTATUS_FS)
#define SET_FS_DIRTY() ((void)0)
#define GET_F32_RS1(insn, regs) (GET_F32_REG(insn, 15, regs))
#define GET_F32_RS2(insn, regs) (GET_F32_REG(insn, 20, regs))
@@ -92,9 +93,9 @@
#define GET_F64_RS2(insn, regs) (GET_F64_REG(insn, 20, regs))
#define GET_F64_RS3(insn, regs) (GET_F64_REG(insn, 27, regs))
#define SET_F32_RD(insn, regs, val) \
(SET_F32_REG(insn, 7, regs, val), SET_FS_DIRTY(regs))
(SET_F32_REG(insn, 7, regs, val), SET_FS_DIRTY())
#define SET_F64_RD(insn, regs, val) \
(SET_F64_REG(insn, 7, regs, val), SET_FS_DIRTY(regs))
(SET_F64_REG(insn, 7, regs, val), SET_FS_DIRTY())
#define GET_F32_RS2C(insn, regs) (GET_F32_REG(insn, 2, regs))
#define GET_F32_RS2S(insn, regs) (GET_F32_REG(RVC_RS2S(insn), 0, regs))

View File

@@ -62,11 +62,6 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
}
}
static inline int bitmap_test(unsigned long *bmap, int bit)
{
return __test_bit(bit, bmap);
}
static inline void bitmap_zero_except(unsigned long *dst,
int exception, int nbits)
{
@@ -130,17 +125,4 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
int i, res = 0;
for (i = 0; i < nbits / BITS_PER_LONG; i++)
res += sbi_popcount(src[i]);
if (nbits % BITS_PER_LONG)
res += sbi_popcount(src[i] & BITMAP_LAST_WORD_MASK(nbits));
return res;
}
#endif

View File

@@ -14,8 +14,6 @@
#define BITS_PER_LONG (8 * __SIZEOF_LONG__)
#define BITS_PER_LONG_LONG 64
#define EXTRACT_FIELD(val, which) \
(((val) & (which)) / ((which) & ~((which)-1)))
#define INSERT_FIELD(val, which, fieldval) \
@@ -28,15 +26,10 @@
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(bit) ((bit) / BITS_PER_LONG)
#define BIT_WORD_OFFSET(bit) ((bit) & (BITS_PER_LONG - 1))
#define BIT_ALIGN(bit, align) (((bit) + ((align) - 1)) & ~((align) - 1))
#define BIT_ULL(nr) (1ULL << (nr))
#define GENMASK(h, l) \
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) - (1ULL << (l)) + 1) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
/**
* sbi_ffs - find first (less-significant) set bit in a long word.
* @word: The word to search
@@ -119,30 +112,6 @@ static inline unsigned long sbi_fls(unsigned long word)
return num;
}
/**
* sbi_popcount - find the number of set bit in a long word
* @word: the word to search
*/
static inline unsigned long sbi_popcount(unsigned long word)
{
unsigned long count;
#if BITS_PER_LONG == 64
count = word - ((word >> 1) & 0x5555555555555555ul);
count = (count & 0x3333333333333333ul) + ((count >> 2) & 0x3333333333333333ul);
count = (count + (count >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
count = count + (count >> 8);
count = count + (count >> 16);
return (count + (count >> 32)) & 0x00000000000000FFul;
#else
count = word - ((word >> 1) & 0x55555555);
count = (count & 0x33333333) + ((count >> 2) & 0x33333333);
count = (count + (count >> 4)) & 0x0F0F0F0F;
count = count + (count >> 8);
return (count + (count >> 16)) & 0x000000FF;
#endif
}
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \

View File

@@ -7,20 +7,15 @@
#ifndef __SBI_BYTEORDER_H__
#define __SBI_BYTEORDER_H__
#ifdef __ASSEMBLER__
# define _conv_cast(type, val) (val)
#else
# include <sbi/sbi_types.h>
# define _conv_cast(type, val) ((type)(val))
#endif
#include <sbi/sbi_types.h>
#define __BSWAP16(x) ((((x) & 0x00ff) << 8) | \
#define BSWAP16(x) ((((x) & 0x00ff) << 8) | \
(((x) & 0xff00) >> 8))
#define __BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
#define BSWAP32(x) ((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#define __BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
#define BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \
(((x) & 0x000000000000ff00ULL) << 40) | \
(((x) & 0x0000000000ff0000ULL) << 24) | \
(((x) & 0x00000000ff000000ULL) << 8) | \
@@ -29,52 +24,38 @@
(((x) & 0x00ff000000000000ULL) >> 40) | \
(((x) & 0xff00000000000000ULL) >> 56))
#define BSWAP64(x) ({ uint64_t _sv = (x); __BSWAP64(_sv); })
#define BSWAP32(x) ({ uint32_t _sv = (x); __BSWAP32(_sv); })
#define BSWAP16(x) ({ uint16_t _sv = (x); __BSWAP16(_sv); })
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* CPU(little-endian) */
#define cpu_to_be16(x) _conv_cast(uint16_t, BSWAP16(x))
#define cpu_to_be32(x) _conv_cast(uint32_t, BSWAP32(x))
#define cpu_to_be64(x) _conv_cast(uint64_t, BSWAP64(x))
#define cpu_to_be16(x) ((uint16_t)BSWAP16(x))
#define cpu_to_be32(x) ((uint32_t)BSWAP32(x))
#define cpu_to_be64(x) ((uint64_t)BSWAP64(x))
#define be16_to_cpu(x) _conv_cast(uint16_t, BSWAP16(x))
#define be32_to_cpu(x) _conv_cast(uint32_t, BSWAP32(x))
#define be64_to_cpu(x) _conv_cast(uint64_t, BSWAP64(x))
#define be16_to_cpu(x) ((uint16_t)BSWAP16(x))
#define be32_to_cpu(x) ((uint32_t)BSWAP32(x))
#define be64_to_cpu(x) ((uint64_t)BSWAP64(x))
#define cpu_to_le16(x) _conv_cast(uint16_t, (x))
#define cpu_to_le32(x) _conv_cast(uint32_t, (x))
#define cpu_to_le64(x) _conv_cast(uint64_t, (x))
#define cpu_to_le16(x) ((uint16_t)(x))
#define cpu_to_le32(x) ((uint32_t)(x))
#define cpu_to_le64(x) ((uint64_t)(x))
#define le16_to_cpu(x) _conv_cast(uint16_t, (x))
#define le32_to_cpu(x) _conv_cast(uint32_t, (x))
#define le64_to_cpu(x) _conv_cast(uint64_t, (x))
#define le16_to_cpu(x) ((uint16_t)(x))
#define le32_to_cpu(x) ((uint32_t)(x))
#define le64_to_cpu(x) ((uint64_t)(x))
#else /* CPU(big-endian) */
#define cpu_to_be16(x) _conv_cast(uint16_t, (x))
#define cpu_to_be32(x) _conv_cast(uint32_t, (x))
#define cpu_to_be64(x) _conv_cast(uint64_t, (x))
#define cpu_to_be16(x) ((uint16_t)(x))
#define cpu_to_be32(x) ((uint32_t)(x))
#define cpu_to_be64(x) ((uint64_t)(x))
#define be16_to_cpu(x) _conv_cast(uint16_t, (x))
#define be32_to_cpu(x) _conv_cast(uint32_t, (x))
#define be64_to_cpu(x) _conv_cast(uint64_t, (x))
#define be16_to_cpu(x) ((uint16_t)(x))
#define be32_to_cpu(x) ((uint32_t)(x))
#define be64_to_cpu(x) ((uint64_t)(x))
#define cpu_to_le16(x) _conv_cast(uint16_t, BSWAP16(x))
#define cpu_to_le32(x) _conv_cast(uint32_t, BSWAP32(x))
#define cpu_to_le64(x) _conv_cast(uint64_t, BSWAP64(x))
#define cpu_to_le16(x) ((uint16_t)BSWAP16(x))
#define cpu_to_le32(x) ((uint32_t)BSWAP32(x))
#define cpu_to_le64(x) ((uint64_t)BSWAP64(x))
#define le16_to_cpu(x) _conv_cast(uint16_t, BSWAP16(x))
#define le32_to_cpu(x) _conv_cast(uint32_t, BSWAP32(x))
#define le64_to_cpu(x) _conv_cast(uint64_t, BSWAP64(x))
#endif
#if __riscv_xlen == 64
#define cpu_to_lle cpu_to_le64
#define lle_to_cpu le64_to_cpu
#elif __riscv_xlen == 32
#define cpu_to_lle cpu_to_le32
#define lle_to_cpu le32_to_cpu
#else
#error "Unknown __riscv_xlen"
#define le16_to_cpu(x) ((uint16_t)BSWAP16(x))
#define le32_to_cpu(x) ((uint32_t)BSWAP32(x))
#define le64_to_cpu(x) ((uint64_t)BSWAP64(x))
#endif
#endif /* __SBI_BYTEORDER_H__ */

View File

@@ -58,6 +58,8 @@ void sbi_console_set_device(const struct sbi_console_device *dev);
struct sbi_scratch;
int sbi_console_init(struct sbi_scratch *scratch);
#define SBI_ASSERT(cond, args) do { \
if (unlikely(!(cond))) \
sbi_panic args; \

View File

@@ -18,7 +18,7 @@
({ \
register ulong tinfo asm("a3") = (ulong)trap; \
register ulong ttmp asm("a4"); \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
register ulong ret = 0; \
((struct sbi_trap_info *)(trap))->cause = 0; \
asm volatile( \
@@ -37,7 +37,7 @@
({ \
register ulong tinfo asm("a3") = (ulong)trap; \
register ulong ttmp asm("a4"); \
register ulong mtvec = (ulong)sbi_hart_expected_trap; \
register ulong mtvec = sbi_hart_expected_trap_addr(); \
((struct sbi_trap_info *)(trap))->cause = 0; \
asm volatile( \
"add %[ttmp], %[tinfo], zero\n" \

View File

@@ -1,124 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro Systems, Inc.
*
* Authors:
* Himanshu Chauhan <hchauhan@ventanamicro.com>
*/
#ifndef __SBI_DBTR_H__
#define __SBI_DBTR_H__
#include <sbi/riscv_dbtr.h>
#include <sbi/sbi_types.h>
struct sbi_domain;
enum {
RV_DBTR_DECLARE_BIT(TS, MAPPED, 0), /* trigger mapped to hw trigger */
RV_DBTR_DECLARE_BIT(TS, U, 1),
RV_DBTR_DECLARE_BIT(TS, S, 2),
RV_DBTR_DECLARE_BIT(TS, VU, 3),
RV_DBTR_DECLARE_BIT(TS, VS, 4),
RV_DBTR_DECLARE_BIT(TS, HAVE_TRIG, 5), /* H/w dbtr details available */
RV_DBTR_DECLARE_BIT(TS, HW_IDX, 8), /* Hardware index of trigger */
};
enum {
RV_DBTR_DECLARE_BIT_MASK(TS, MAPPED, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, U, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, S, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, VU, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, VS, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, HAVE_TRIG, 1),
RV_DBTR_DECLARE_BIT_MASK(TS, HW_IDX, (__riscv_xlen-9)),
};
#if __riscv_xlen == 64
#define SBI_DBTR_SHMEM_INVALID_ADDR 0xFFFFFFFFFFFFFFFFUL
#elif __riscv_xlen == 32
#define SBI_DBTR_SHMEM_INVALID_ADDR 0xFFFFFFFFUL
#else
#error "Unexpected __riscv_xlen"
#endif
struct sbi_dbtr_shmem {
unsigned long phys_lo;
unsigned long phys_hi;
};
struct sbi_dbtr_trigger {
unsigned long index;
unsigned long type_mask;
unsigned long state;
unsigned long tdata1;
unsigned long tdata2;
unsigned long tdata3;
};
struct sbi_dbtr_data_msg {
unsigned long tstate;
unsigned long tdata1;
unsigned long tdata2;
unsigned long tdata3;
};
struct sbi_dbtr_id_msg {
unsigned long idx;
};
struct sbi_dbtr_hart_triggers_state {
struct sbi_dbtr_trigger triggers[RV_MAX_TRIGGERS];
struct sbi_dbtr_shmem shmem;
u32 total_trigs;
u32 available_trigs;
u32 hartid;
u32 probed;
};
#define TDATA1_GET_TYPE(_t1) \
EXTRACT_FIELD(_t1, RV_DBTR_BIT_MASK(TDATA1, TYPE))
/* Set the hardware index of trigger in logical trigger state */
#define SET_TRIG_HW_INDEX(_state, _idx) \
do { \
_state &= ~RV_DBTR_BIT_MASK(TS, HW_IDX); \
_state |= (((unsigned long)_idx \
<< RV_DBTR_BIT(TS, HW_IDX)) \
& RV_DBTR_BIT_MASK(TS, HW_IDX)); \
}while (0);
/** SBI shared mem messages layout */
union sbi_dbtr_shmem_entry {
struct sbi_dbtr_data_msg data;
struct sbi_dbtr_id_msg id;
};
#define SBI_DBTR_SHMEM_ALIGN_MASK ((__riscv_xlen / 8) - 1)
/** Initialize debug triggers */
int sbi_dbtr_init(struct sbi_scratch *scratch, bool coldboot);
/** SBI DBTR extension functions */
int sbi_dbtr_supported(void);
int sbi_dbtr_setup_shmem(const struct sbi_domain *dom, unsigned long smode,
unsigned long shmem_phys_lo,
unsigned long shmem_phys_hi);
int sbi_dbtr_num_trig(unsigned long trig_tdata1, unsigned long *out);
int sbi_dbtr_read_trig(unsigned long smode,
unsigned long trig_idx_base, unsigned long trig_count);
int sbi_dbtr_install_trig(unsigned long smode,
unsigned long trig_count, unsigned long *out);
int sbi_dbtr_uninstall_trig(unsigned long trig_idx_base,
unsigned long trig_idx_mask);
int sbi_dbtr_enable_trig(unsigned long trig_idx_base,
unsigned long trig_idx_mask);
int sbi_dbtr_update_trig(unsigned long smode,
unsigned long trig_count);
int sbi_dbtr_disable_trig(unsigned long trig_idx_base,
unsigned long trig_idx_mask);
int sbi_dbtr_get_total_triggers(void);
#endif

View File

@@ -10,12 +10,8 @@
#ifndef __SBI_DOMAIN_H__
#define __SBI_DOMAIN_H__
#include <sbi/riscv_locks.h>
#include <sbi/sbi_list.h>
#include <sbi/sbi_types.h>
#include <sbi/sbi_hartmask.h>
#include <sbi/sbi_domain_context.h>
#include <sbi/sbi_domain_data.h>
struct sbi_scratch;
@@ -47,80 +43,6 @@ struct sbi_domain_memregion {
#define SBI_DOMAIN_MEMREGION_SU_WRITABLE (1UL << 4)
#define SBI_DOMAIN_MEMREGION_SU_EXECUTABLE (1UL << 5)
#define SBI_DOMAIN_MEMREGION_ACCESS_MASK (0x3fUL)
#define SBI_DOMAIN_MEMREGION_M_ACCESS_MASK (0x7UL)
#define SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK (0x38UL)
#define SBI_DOMAIN_MEMREGION_SU_ACCESS_SHIFT (3)
#define SBI_DOMAIN_MEMREGION_SHARED_RDONLY \
(SBI_DOMAIN_MEMREGION_M_READABLE | \
SBI_DOMAIN_MEMREGION_SU_READABLE)
#define SBI_DOMAIN_MEMREGION_SHARED_SUX_MRX \
(SBI_DOMAIN_MEMREGION_M_READABLE | \
SBI_DOMAIN_MEMREGION_M_EXECUTABLE | \
SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
#define SBI_DOMAIN_MEMREGION_SHARED_SUX_MX \
(SBI_DOMAIN_MEMREGION_M_EXECUTABLE | \
SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
#define SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW \
(SBI_DOMAIN_MEMREGION_M_READABLE | \
SBI_DOMAIN_MEMREGION_M_WRITABLE | \
SBI_DOMAIN_MEMREGION_SU_READABLE| \
SBI_DOMAIN_MEMREGION_SU_WRITABLE)
#define SBI_DOMAIN_MEMREGION_SHARED_SUR_MRW \
(SBI_DOMAIN_MEMREGION_M_READABLE | \
SBI_DOMAIN_MEMREGION_M_WRITABLE | \
SBI_DOMAIN_MEMREGION_SU_READABLE)
/* Shared read-only region between M and SU mode */
#define SBI_DOMAIN_MEMREGION_IS_SUR_MR(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == \
SBI_DOMAIN_MEMREGION_SHARED_RDONLY)
/* Shared region: SU execute-only and M read/execute */
#define SBI_DOMAIN_MEMREGION_IS_SUX_MRX(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == \
SBI_DOMAIN_MEMREGION_SHARED_SUX_MRX)
/* Shared region: SU and M execute-only */
#define SBI_DOMAIN_MEMREGION_IS_SUX_MX(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == \
SBI_DOMAIN_MEMREGION_SHARED_SUX_MX)
/* Shared region: SU and M read/write */
#define SBI_DOMAIN_MEMREGION_IS_SURW_MRW(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == \
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW)
/* Shared region: SU read-only and M read/write */
#define SBI_DOMAIN_MEMREGION_IS_SUR_MRW(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_ACCESS_MASK) == \
SBI_DOMAIN_MEMREGION_SHARED_SUR_MRW)
/*
* Check if region flags match with any of the above
* mentioned shared region type
*/
#define SBI_DOMAIN_MEMREGION_IS_SHARED(_flags) \
(SBI_DOMAIN_MEMREGION_IS_SUR_MR(_flags) || \
SBI_DOMAIN_MEMREGION_IS_SUX_MRX(_flags) || \
SBI_DOMAIN_MEMREGION_IS_SUX_MX(_flags) || \
SBI_DOMAIN_MEMREGION_IS_SURW_MRW(_flags)|| \
SBI_DOMAIN_MEMREGION_IS_SUR_MRW(_flags))
#define SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK) && \
!(__flags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK))
#define SBI_DOMAIN_MEMREGION_SU_ONLY_ACCESS(__flags) \
((__flags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK) && \
!(__flags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK))
/** Bit to control if permissions are enforced on all modes */
#define SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS (1UL << 6)
@@ -156,22 +78,32 @@ struct sbi_domain_memregion {
(SBI_DOMAIN_MEMREGION_SU_EXECUTABLE | \
SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
#define SBI_DOMAIN_MEMREGION_ACCESS_MASK (0x3fUL)
#define SBI_DOMAIN_MEMREGION_M_ACCESS_MASK (0x7UL)
#define SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK (0x38UL)
#define SBI_DOMAIN_MEMREGION_SU_ACCESS_SHIFT (3)
#define SBI_DOMAIN_MEMREGION_MMIO (1UL << 31)
unsigned long flags;
};
/** Maximum number of domains */
#define SBI_DOMAIN_MAX_INDEX 32
/** Representation of OpenSBI domain */
struct sbi_domain {
/** Node in linked list of domains */
struct sbi_dlist node;
/** Internal state of per-domain data */
struct sbi_domain_data_priv data_priv;
/** Logical index of this domain */
/**
* Logical index of this domain
* Note: This set by sbi_domain_finalize() in the coldboot path
*/
u32 index;
/** HARTs assigned to this domain */
/**
* HARTs assigned to this domain
* Note: This set by sbi_domain_init() and sbi_domain_finalize()
* in the coldboot path
*/
struct sbi_hartmask assigned_harts;
/** Spinlock for accessing assigned_harts */
spinlock_t assigned_harts_lock;
/** Name of this domain */
char name[64];
/** Possible HARTs in this domain */
@@ -197,22 +129,23 @@ struct sbi_domain {
/** The root domain instance */
extern struct sbi_domain root;
/** Get pointer to sbi_domain from HART index */
struct sbi_domain *sbi_hartindex_to_domain(u32 hartindex);
/** Update HART local pointer to point to specified domain */
void sbi_update_hartindex_to_domain(u32 hartindex, struct sbi_domain *dom);
/** Get pointer to sbi_domain from HART id */
struct sbi_domain *sbi_hartid_to_domain(u32 hartid);
/** Get pointer to sbi_domain for current HART */
#define sbi_domain_thishart_ptr() \
sbi_hartindex_to_domain(current_hartindex())
sbi_hartid_to_domain(current_hartid())
/** Head of linked list of domains */
extern struct sbi_dlist domain_list;
/** Index to domain table */
extern struct sbi_domain *domidx_to_domain_table[];
/** Get pointer to sbi_domain from index */
#define sbi_index_to_domain(__index) \
domidx_to_domain_table[__index]
/** Iterate over each domain */
#define sbi_domain_for_each(__d) \
sbi_list_for_each_entry(__d, &domain_list, node)
#define sbi_domain_for_each(__i, __d) \
for ((__i) = 0; ((__d) = sbi_index_to_domain(__i)); (__i)++)
/** Iterate over each memory region of a domain */
#define sbi_domain_for_each_memregion(__d, __r) \
@@ -221,19 +154,20 @@ extern struct sbi_dlist domain_list;
/**
* Check whether given HART is assigned to specified domain
* @param dom pointer to domain
* @param hartindex the HART index
* @param hartid the HART ID
* @return true if HART is assigned to domain otherwise false
*/
bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartindex);
bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid);
/**
* Get the assigned HART mask for given domain
* Get ulong assigned HART mask for given domain and HART base ID
* @param dom pointer to domain
* @param mask the output hartmask to fill
* @return 0 on success and SBI_Exxx (< 0) on failure
* @param hbase the HART base ID
* @return ulong possible HART mask
* Note: the return ulong mask will be set to zero on failure.
*/
int sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
struct sbi_hartmask *mask);
ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
ulong hbase);
/**
* Initialize a domain memory region based on it's physical
@@ -293,6 +227,16 @@ void sbi_domain_dump_all(const char *suffix);
int sbi_domain_register(struct sbi_domain *dom,
const struct sbi_hartmask *assign_mask);
/**
* Add a memory region to the root domain
* @param reg pointer to the memory region to be added
*
* @return 0 on success
* @return SBI_EALREADY if memory region conflicts with the existing one
* @return SBI_EINVAL otherwise
*/
int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg);
/**
* Add a memory range with its flags to the root domain
* @param addr start physical address of memory range
@@ -307,11 +251,8 @@ int sbi_domain_register(struct sbi_domain *dom,
int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
unsigned long align, unsigned long region_flags);
/** Startup non-root domains */
int sbi_domain_startup(struct sbi_scratch *scratch, u32 cold_hartid);
/** Finalize domain tables */
int sbi_domain_finalize(struct sbi_scratch *scratch);
/** Finalize domain tables and startup non-root domains */
int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid);
/** Initialize domains */
int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid);

View File

@@ -1,41 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) IPADS@SJTU 2023. All rights reserved.
*/
#ifndef __SBI_DOMAIN_CONTEXT_H__
#define __SBI_DOMAIN_CONTEXT_H__
#include <sbi/sbi_types.h>
struct sbi_domain;
/**
* Enter a specific domain context synchronously
* @param dom pointer to domain
*
* @return 0 on success and negative error code on failure
*/
int sbi_domain_context_enter(struct sbi_domain *dom);
/**
* Exit the current domain context, and then return to the caller
* of sbi_domain_context_enter or attempt to start the next domain
* context to be initialized
*
* @return 0 on success and negative error code on failure
*/
int sbi_domain_context_exit(void);
/**
* Initialize domain context support
*
* @return 0 on success and negative error code on failure
*/
int sbi_domain_context_init(void);
/* Deinitialize domain context support */
void sbi_domain_context_deinit(void);
#endif // __SBI_DOMAIN_CONTEXT_H__

View File

@@ -1,93 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*/
#ifndef __SBI_DOMAIN_DATA_H__
#define __SBI_DOMAIN_DATA_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
struct sbi_domain;
/** Maximum domain data per-domain */
#define SBI_DOMAIN_MAX_DATA_PTRS 32
/** Representation of per-domain data */
struct sbi_domain_data_priv {
/** Array of domain data pointers indexed by domain data identifier */
void *idx_to_data_ptr[SBI_DOMAIN_MAX_DATA_PTRS];
};
/** Representation of a domain data */
struct sbi_domain_data {
/**
* Head is used for maintaining data list
*
* Note: initialized by domain framework
*/
struct sbi_dlist head;
/**
* Identifier which used to locate per-domain data
*
* Note: initialized by domain framework
*/
unsigned long data_idx;
/** Size of per-domain data */
unsigned long data_size;
/** Optional callback to setup domain data */
int (*data_setup)(struct sbi_domain *dom,
struct sbi_domain_data *data, void *data_ptr);
/** Optional callback to cleanup domain data */
void (*data_cleanup)(struct sbi_domain *dom,
struct sbi_domain_data *data, void *data_ptr);
};
/**
* Get per-domain data pointer for a given domain
* @param dom pointer to domain
* @param data pointer to domain data
*
* @return per-domain data pointer
*/
void *sbi_domain_data_ptr(struct sbi_domain *dom, struct sbi_domain_data *data);
/**
* Setup all domain data for a domain
* @param dom pointer to domain
*
* @return 0 on success and negative error code on failure
*
* Note: This function is used internally within domain framework.
*/
int sbi_domain_setup_data(struct sbi_domain *dom);
/**
* Cleanup all domain data for a domain
* @param dom pointer to domain
*
* Note: This function is used internally within domain framework.
*/
void sbi_domain_cleanup_data(struct sbi_domain *dom);
/**
* Register a domain data
* @param hndl pointer to domain data
*
* @return 0 on success and negative error code on failure
*
* Note: This function must be used only in cold boot path.
*/
int sbi_domain_register_data(struct sbi_domain_data *data);
/**
* Unregister a domain data
* @param hndl pointer to domain data
*
* Note: This function must be used only in cold boot path.
*/
void sbi_domain_unregister_data(struct sbi_domain_data *data);
#endif

View File

@@ -1,20 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 Rivos Inc.
*
* Authors:
* Clément Léger <cleger@rivosinc.com>
*/
#ifndef __SBI_DOUBLE_TRAP_H__
#define __SBI_DOUBLE_TRAP_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_trap.h>
int sbi_double_trap_handler(struct sbi_trap_context *tcntx);
void sbi_double_trap_init(struct sbi_scratch *scratch);
#endif

View File

@@ -13,25 +13,16 @@
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
#define SBI_ECALL_VERSION_MAJOR 3
#define SBI_ECALL_VERSION_MAJOR 1
#define SBI_ECALL_VERSION_MINOR 0
#define SBI_OPENSBI_IMPID 1
struct sbi_trap_regs;
struct sbi_trap_context;
struct sbi_ecall_return {
/* Return flag to skip register update */
bool skip_regs_update;
/* Return value */
unsigned long value;
};
struct sbi_trap_info;
struct sbi_ecall_extension {
/* head is used by the extension list */
struct sbi_dlist head;
/* short name of the extension */
char name[8];
/*
* extid_start and extid_end specify the range for this extension. As
* the initial range may be wider than the valid runtime range, the
@@ -40,8 +31,6 @@ struct sbi_ecall_extension {
*/
unsigned long extid_start;
unsigned long extid_end;
/* flag showing whether given extension is experimental or not */
bool experimental;
/*
* register_extensions
*
@@ -73,8 +62,9 @@ struct sbi_ecall_extension {
* never invoked with an invalid or unavailable extension ID.
*/
int (* handle)(unsigned long extid, unsigned long funcid,
struct sbi_trap_regs *regs,
struct sbi_ecall_return *out);
const struct sbi_trap_regs *regs,
unsigned long *out_val,
struct sbi_trap_info *out_trap);
};
u16 sbi_ecall_version_major(void);
@@ -87,13 +77,11 @@ void sbi_ecall_set_impid(unsigned long impid);
struct sbi_ecall_extension *sbi_ecall_find_extension(unsigned long extid);
void sbi_ecall_get_extensions_str(char *exts_str, int exts_str_size, bool experimental);
int sbi_ecall_register_extension(struct sbi_ecall_extension *ext);
void sbi_ecall_unregister_extension(struct sbi_ecall_extension *ext);
int sbi_ecall_handler(struct sbi_trap_context *tcntx);
int sbi_ecall_handler(struct sbi_trap_regs *regs);
int sbi_ecall_init(void);

View File

@@ -12,8 +12,6 @@
/* clang-format off */
#include <sbi/sbi_types.h>
/* SBI Extension IDs */
#define SBI_EXT_0_1_SET_TIMER 0x0
#define SBI_EXT_0_1_CONSOLE_PUTCHAR 0x1
@@ -34,10 +32,6 @@
#define SBI_EXT_DBCN 0x4442434E
#define SBI_EXT_SUSP 0x53555350
#define SBI_EXT_CPPC 0x43505043
#define SBI_EXT_DBTR 0x44425452
#define SBI_EXT_SSE 0x535345
#define SBI_EXT_FWFT 0x46574654
#define SBI_EXT_MPXY 0x4D505859
/* SBI function IDs for BASE extension*/
#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0
@@ -109,45 +103,6 @@
#define SBI_EXT_PMU_COUNTER_STOP 0x4
#define SBI_EXT_PMU_COUNTER_FW_READ 0x5
#define SBI_EXT_PMU_COUNTER_FW_READ_HI 0x6
#define SBI_EXT_PMU_SNAPSHOT_SET_SHMEM 0x7
#define SBI_EXT_PMU_EVENT_GET_INFO 0x8
/* SBI function IDs for DBTR extension */
#define SBI_EXT_DBTR_NUM_TRIGGERS 0x0
#define SBI_EXT_DBTR_SETUP_SHMEM 0x1
#define SBI_EXT_DBTR_TRIGGER_READ 0x2
#define SBI_EXT_DBTR_TRIGGER_INSTALL 0x3
#define SBI_EXT_DBTR_TRIGGER_UPDATE 0x4
#define SBI_EXT_DBTR_TRIGGER_UNINSTALL 0x5
#define SBI_EXT_DBTR_TRIGGER_ENABLE 0x6
#define SBI_EXT_DBTR_TRIGGER_DISABLE 0x7
/* SBI function IDs for FW feature extension */
#define SBI_EXT_FWFT_SET 0x0
#define SBI_EXT_FWFT_GET 0x1
enum sbi_fwft_feature_t {
SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0,
SBI_FWFT_LANDING_PAD = 0x1,
SBI_FWFT_SHADOW_STACK = 0x2,
SBI_FWFT_DOUBLE_TRAP = 0x3,
SBI_FWFT_PTE_AD_HW_UPDATING = 0x4,
SBI_FWFT_POINTER_MASKING_PMLEN = 0x5,
SBI_FWFT_LOCAL_RESERVED_START = 0x6,
SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff,
SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000,
SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff,
SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000,
SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff,
SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000,
SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff,
};
#define SBI_FWFT_GLOBAL_FEATURE_BIT (1 << 31)
#define SBI_FWFT_PLATFORM_FEATURE_BIT (1 << 30)
#define SBI_FWFT_SET_FLAG_LOCK (1 << 0)
/** General pmu event codes specified in SBI PMU extension */
enum sbi_pmu_hw_generic_events_t {
@@ -249,7 +204,6 @@ enum sbi_pmu_event_type_id {
SBI_PMU_EVENT_TYPE_HW = 0x0,
SBI_PMU_EVENT_TYPE_HW_CACHE = 0x1,
SBI_PMU_EVENT_TYPE_HW_RAW = 0x2,
SBI_PMU_EVENT_TYPE_HW_RAW_V2 = 0x3,
SBI_PMU_EVENT_TYPE_FW = 0xf,
SBI_PMU_EVENT_TYPE_MAX,
};
@@ -260,19 +214,12 @@ enum sbi_pmu_ctr_type {
SBI_PMU_CTR_TYPE_FW,
};
struct sbi_pmu_event_info {
uint32_t event_idx;
uint32_t output;
uint64_t event_data;
};
/* Helper macros to decode event idx */
#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
#define SBI_PMU_EVENT_IDX_TYPE_OFFSET 16
#define SBI_PMU_EVENT_IDX_TYPE_MASK (0xF << SBI_PMU_EVENT_IDX_TYPE_OFFSET)
#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
#define SBI_PMU_EVENT_RAW_IDX 0x20000
#define SBI_PMU_EVENT_RAW_V2_IDX 0x30000
#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
@@ -294,11 +241,9 @@ struct sbi_pmu_event_info {
/* Flags defined for counter start function */
#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
#define SBI_PMU_START_FLAG_INIT_FROM_SNAPSHOT (1 << 1)
/* Flags defined for counter stop function */
#define SBI_PMU_STOP_FLAG_RESET (1 << 0)
#define SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT (1 << 1)
/* SBI function IDs for DBCN extension */
#define SBI_EXT_DBCN_CONSOLE_WRITE 0x0
@@ -345,127 +290,10 @@ enum sbi_cppc_reg_id {
SBI_CPPC_NON_ACPI_LAST = SBI_CPPC_TRANSITION_LATENCY,
};
/* SBI Function IDs for SSE extension */
#define SBI_EXT_SSE_READ_ATTR 0x00000000
#define SBI_EXT_SSE_WRITE_ATTR 0x00000001
#define SBI_EXT_SSE_REGISTER 0x00000002
#define SBI_EXT_SSE_UNREGISTER 0x00000003
#define SBI_EXT_SSE_ENABLE 0x00000004
#define SBI_EXT_SSE_DISABLE 0x00000005
#define SBI_EXT_SSE_COMPLETE 0x00000006
#define SBI_EXT_SSE_INJECT 0x00000007
#define SBI_EXT_SSE_HART_UNMASK 0x00000008
#define SBI_EXT_SSE_HART_MASK 0x00000009
/* SBI SSE Event Attributes. */
enum sbi_sse_attr_id {
SBI_SSE_ATTR_STATUS = 0x00000000,
SBI_SSE_ATTR_PRIO = 0x00000001,
SBI_SSE_ATTR_CONFIG = 0x00000002,
SBI_SSE_ATTR_PREFERRED_HART = 0x00000003,
SBI_SSE_ATTR_ENTRY_PC = 0x00000004,
SBI_SSE_ATTR_ENTRY_ARG = 0x00000005,
SBI_SSE_ATTR_INTERRUPTED_SEPC = 0x00000006,
SBI_SSE_ATTR_INTERRUPTED_FLAGS = 0x00000007,
SBI_SSE_ATTR_INTERRUPTED_A6 = 0x00000008,
SBI_SSE_ATTR_INTERRUPTED_A7 = 0x00000009,
SBI_SSE_ATTR_MAX = 0x0000000A
};
#define SBI_SSE_ATTR_STATUS_STATE_OFFSET 0
#define SBI_SSE_ATTR_STATUS_STATE_MASK 0x3
#define SBI_SSE_ATTR_STATUS_PENDING_OFFSET 2
#define SBI_SSE_ATTR_STATUS_INJECT_OFFSET 3
#define SBI_SSE_ATTR_CONFIG_ONESHOT (1 << 0)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP BIT(0)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE BIT(1)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV BIT(2)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP BIT(3)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP BIT(4)
#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT BIT(5)
enum sbi_sse_state {
SBI_SSE_STATE_UNUSED = 0,
SBI_SSE_STATE_REGISTERED = 1,
SBI_SSE_STATE_ENABLED = 2,
SBI_SSE_STATE_RUNNING = 3,
};
/* SBI SSE Event IDs. */
/* Range 0x00000000 - 0x0000ffff */
#define SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS 0x00000000
#define SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP 0x00000001
#define SBI_SSE_EVENT_LOCAL_RESERVED_0_START 0x00000002
#define SBI_SSE_EVENT_LOCAL_RESERVED_0_END 0x00003fff
#define SBI_SSE_EVENT_LOCAL_PLAT_0_START 0x00004000
#define SBI_SSE_EVENT_LOCAL_PLAT_0_END 0x00007fff
#define SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS 0x00008000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_START 0x00008001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_END 0x0000bfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_0_START 0x0000c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_0_END 0x0000ffff
/* Range 0x00010000 - 0x0001ffff */
#define SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW 0x00010000
#define SBI_SSE_EVENT_LOCAL_RESERVED_1_START 0x00010001
#define SBI_SSE_EVENT_LOCAL_RESERVED_1_END 0x00013fff
#define SBI_SSE_EVENT_LOCAL_PLAT_1_START 0x00014000
#define SBI_SSE_EVENT_LOCAL_PLAT_1_END 0x00017fff
#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_START 0x00018000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_END 0x0001bfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_1_START 0x0001c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_1_END 0x0001ffff
/* Range 0x00100000 - 0x0010ffff */
#define SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS 0x00100000
#define SBI_SSE_EVENT_LOCAL_RESERVED_2_START 0x00100001
#define SBI_SSE_EVENT_LOCAL_RESERVED_2_END 0x00103fff
#define SBI_SSE_EVENT_LOCAL_PLAT_2_START 0x00104000
#define SBI_SSE_EVENT_LOCAL_PLAT_2_END 0x00107fff
#define SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS 0x00108000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_START 0x00108001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_END 0x0010bfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_START 0x0010c000
#define SBI_SSE_EVENT_GLOBAL_PLAT_2_END 0x0010ffff
/* Range 0xffff0000 - 0xffffffff */
#define SBI_SSE_EVENT_LOCAL_SOFTWARE 0xffff0000
#define SBI_SSE_EVENT_LOCAL_RESERVED_3_START 0xffff0001
#define SBI_SSE_EVENT_LOCAL_RESERVED_3_END 0xffff3fff
#define SBI_SSE_EVENT_LOCAL_PLAT_3_START 0xffff4000
#define SBI_SSE_EVENT_LOCAL_PLAT_3_END 0xffff7fff
#define SBI_SSE_EVENT_GLOBAL_SOFTWARE 0xffff8000
#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_START 0xffff8001
#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_END 0xffffbfff
#define SBI_SSE_EVENT_GLOBAL_PLAT_3_START 0xffffc000
#define SBI_SSE_EVENT_GLOBAL_PLAT_3_END 0xffffffff
#define SBI_SSE_EVENT_GLOBAL_BIT BIT(15)
#define SBI_SSE_EVENT_PLATFORM_BIT BIT(14)
/* SBI function IDs for MPXY extension */
#define SBI_EXT_MPXY_GET_SHMEM_SIZE 0x0
#define SBI_EXT_MPXY_SET_SHMEM 0x1
#define SBI_EXT_MPXY_GET_CHANNEL_IDS 0x2
#define SBI_EXT_MPXY_READ_ATTRS 0x3
#define SBI_EXT_MPXY_WRITE_ATTRS 0x4
#define SBI_EXT_MPXY_SEND_MSG_WITH_RESP 0x5
#define SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP 0x6
#define SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS 0x7
/* SBI base specification related macros */
#define SBI_SPEC_VERSION_MAJOR_OFFSET 24
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
#define SBI_EXT_EXPERIMENTAL_START 0x08000000
#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF
#define SBI_EXT_VENDOR_START 0x09000000
#define SBI_EXT_VENDOR_END 0x09FFFFFF
#define SBI_EXT_FIRMWARE_START 0x0A000000
@@ -481,14 +309,8 @@ enum sbi_sse_state {
#define SBI_ERR_ALREADY_AVAILABLE -6
#define SBI_ERR_ALREADY_STARTED -7
#define SBI_ERR_ALREADY_STOPPED -8
#define SBI_ERR_NO_SHMEM -9
#define SBI_ERR_INVALID_STATE -10
#define SBI_ERR_BAD_RANGE -11
#define SBI_ERR_TIMEOUT -12
#define SBI_ERR_IO -13
#define SBI_ERR_DENIED_LOCKED -14
#define SBI_LAST_ERR SBI_ERR_DENIED_LOCKED
#define SBI_LAST_ERR SBI_ERR_ALREADY_STOPPED
/* clang-format on */

View File

@@ -23,21 +23,17 @@
#define SBI_EALREADY SBI_ERR_ALREADY_AVAILABLE
#define SBI_EALREADY_STARTED SBI_ERR_ALREADY_STARTED
#define SBI_EALREADY_STOPPED SBI_ERR_ALREADY_STOPPED
#define SBI_ENO_SHMEM SBI_ERR_NO_SHMEM
#define SBI_EINVALID_STATE SBI_ERR_INVALID_STATE
#define SBI_EBAD_RANGE SBI_ERR_BAD_RANGE
#define SBI_ETIMEOUT SBI_ERR_TIMEOUT
#define SBI_ETIMEDOUT SBI_ERR_TIMEOUT
#define SBI_EIO SBI_ERR_IO
#define SBI_EDENIED_LOCKED SBI_ERR_DENIED_LOCKED
#define SBI_ENODEV -1000
#define SBI_ENOSYS -1001
#define SBI_EILL -1002
#define SBI_ENOSPC -1003
#define SBI_ENOMEM -1004
#define SBI_EUNKNOWN -1005
#define SBI_ENOENT -1006
#define SBI_ETIMEDOUT -1002
#define SBI_EIO -1003
#define SBI_EILL -1004
#define SBI_ENOSPC -1005
#define SBI_ENOMEM -1006
#define SBI_ETRAP -1007
#define SBI_EUNKNOWN -1008
#define SBI_ENOENT -1009
/* clang-format on */

View File

@@ -23,18 +23,6 @@ struct sbi_fifo {
u16 tail;
};
#define SBI_FIFO_INITIALIZER(__queue_mem, __entries, __entry_size) \
{ .queue = __queue_mem, \
.qlock = SPIN_LOCK_INITIALIZER, \
.num_entries = __entries, \
.entry_size = __entry_size, \
.avail = 0, \
.tail = 0, \
}
#define SBI_FIFO_DEFINE(__name, __queue_mem, __entries, __entry_size) \
struct sbi_fifo __name = SBI_FIFO_INITIALIZER(__queue_mem, __entries, __entry_size)
enum sbi_fifo_inplace_update_types {
SBI_FIFO_SKIP,
SBI_FIFO_UPDATED,
@@ -42,7 +30,7 @@ enum sbi_fifo_inplace_update_types {
};
int sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data);
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data, bool force);
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data);
void sbi_fifo_init(struct sbi_fifo *fifo, void *queue_mem, u16 entries,
u16 entry_size);
int sbi_fifo_is_empty(struct sbi_fifo *fifo);

View File

@@ -1,23 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Rivos Inc.
*
* Authors:
* Clément Léger <cleger@rivosinc.com>
*/
#ifndef __SBI_FW_FEATURE_H__
#define __SBI_FW_FEATURE_H__
#include <sbi/sbi_ecall_interface.h>
struct sbi_scratch;
int sbi_fwft_set(enum sbi_fwft_feature_t feature, unsigned long value,
unsigned long flags);
int sbi_fwft_get(enum sbi_fwft_feature_t feature, unsigned long *out_val);
int sbi_fwft_init(struct sbi_scratch *scratch, bool cold_boot);
#endif

View File

@@ -11,7 +11,6 @@
#define __SBI_HART_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_bitops.h>
/** Possible privileged specification versions of a hart */
enum sbi_hart_priv_versions {
@@ -27,104 +26,29 @@ enum sbi_hart_priv_versions {
/** Possible ISA extensions of a hart */
enum sbi_hart_extensions {
/** HART has AIA M-mode CSRs */
SBI_HART_EXT_SMAIA = 0,
/** HART has Smepmp */
SBI_HART_EXT_SMEPMP,
/** HART has Smstateen extension **/
SBI_HART_EXT_SMSTATEEN,
/** Hart has Sscofpmt extension */
SBI_HART_EXT_SSCOFPMF,
SBI_HART_EXT_SSCOFPMF = 0,
/** HART has HW time CSR (extension name not available) */
SBI_HART_EXT_TIME,
/** HART has AIA M-mode CSRs */
SBI_HART_EXT_SMAIA,
/** HART has Smstateen CSR **/
SBI_HART_EXT_SMSTATEEN,
/** HART has Sstc extension */
SBI_HART_EXT_SSTC,
/** HART has Zicntr extension (i.e. HW cycle, time & instret CSRs) */
SBI_HART_EXT_ZICNTR,
/** HART has Zihpm extension */
SBI_HART_EXT_ZIHPM,
/** HART has Zkr extension */
SBI_HART_EXT_ZKR,
/** Hart has Smcntrpmf extension */
SBI_HART_EXT_SMCNTRPMF,
/** Hart has Xandespmu extension */
SBI_HART_EXT_XANDESPMU,
/** Hart has Zicboz extension */
SBI_HART_EXT_ZICBOZ,
/** Hart has Zicbom extension */
SBI_HART_EXT_ZICBOM,
/** Hart has Svpbmt extension */
SBI_HART_EXT_SVPBMT,
/** Hart has debug trigger extension */
SBI_HART_EXT_SDTRIG,
/** Hart has Smcsrind extension */
SBI_HART_EXT_SMCSRIND,
/** Hart has Smcdeleg extension */
SBI_HART_EXT_SMCDELEG,
/** Hart has Sscsrind extension */
SBI_HART_EXT_SSCSRIND,
/** Hart has Ssccfg extension */
SBI_HART_EXT_SSCCFG,
/** Hart has Svade extension */
SBI_HART_EXT_SVADE,
/** Hart has Svadu extension */
SBI_HART_EXT_SVADU,
/** Hart has Smnpm extension */
SBI_HART_EXT_SMNPM,
/** HART has zicfilp extension */
SBI_HART_EXT_ZICFILP,
/** HART has zicfiss extension */
SBI_HART_EXT_ZICFISS,
/** Hart has Ssdbltrp extension */
SBI_HART_EXT_SSDBLTRP,
/** HART has CTR M-mode CSRs */
SBI_HART_EXT_SMCTR,
/** HART has CTR S-mode CSRs */
SBI_HART_EXT_SSCTR,
/** HART has Ssstateen extension **/
SBI_HART_EXT_SSSTATEEN,
/** Maximum index of Hart extension */
SBI_HART_EXT_MAX,
};
struct sbi_hart_ext_data {
const unsigned int id;
const char *name;
};
extern const struct sbi_hart_ext_data sbi_hart_ext[];
/** CSRs should be detected by access and trapping */
enum sbi_hart_csrs {
SBI_HART_CSR_CYCLE = 0,
SBI_HART_CSR_TIME,
SBI_HART_CSR_INSTRET,
SBI_HART_CSR_MAX,
};
/*
* Smepmp enforces access boundaries between M-mode and
* S/U-mode. When it is enabled, the PMPs are programmed
* such that M-mode doesn't have access to S/U-mode memory.
*
* To give M-mode R/W access to the shared memory between M and
* S/U-mode, first entry is reserved. It is disabled at boot.
* When shared memory access is required, the physical address
* should be programmed into the first PMP entry with R/W
* permissions to the M-mode. Once the work is done, it should be
* unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
* pair should be used to map/unmap the shared memory.
*/
#define SBI_SMEPMP_RESV_ENTRY 0
struct sbi_hart_features {
bool detected;
int priv_version;
unsigned long extensions[BITS_TO_LONGS(SBI_HART_EXT_MAX)];
unsigned long csrs[BITS_TO_LONGS(SBI_HART_CSR_MAX)];
unsigned long extensions;
unsigned int pmp_count;
unsigned int pmp_addr_bits;
unsigned int pmp_log2gran;
unsigned int mhpm_mask;
unsigned long pmp_gran;
unsigned int mhpm_count;
unsigned int mhpm_bits;
};
@@ -134,17 +58,19 @@ int sbi_hart_reinit(struct sbi_scratch *scratch);
int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot);
extern void (*sbi_hart_expected_trap)(void);
static inline ulong sbi_hart_expected_trap_addr(void)
{
return (ulong)sbi_hart_expected_trap;
}
unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch);
unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch);
void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
const char *prefix, const char *suffix);
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch);
unsigned long sbi_hart_pmp_granularity(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch);
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch);
int sbi_hart_pmp_configure(struct sbi_scratch *scratch);
int sbi_hart_map_saddr(unsigned long base, unsigned long size);
int sbi_hart_unmap_saddr(void);
int sbi_hart_priv_version(struct sbi_scratch *scratch);
void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch,
char *version_str, int nvstr);
@@ -155,7 +81,6 @@ bool sbi_hart_has_extension(struct sbi_scratch *scratch,
enum sbi_hart_extensions ext);
void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
char *extension_str, int nestr);
bool sbi_hart_has_csr(struct sbi_scratch *scratch, enum sbi_hart_csrs csr);
void __attribute__((noreturn)) sbi_hart_hang(void);

View File

@@ -11,7 +11,6 @@
#define __SBI_HARTMASK_H__
#include <sbi/sbi_bitmap.h>
#include <sbi/sbi_scratch.h>
/**
* Maximum number of bits in a hartmask
@@ -33,10 +32,7 @@ struct sbi_hartmask {
/** Initialize hartmask to zero except a particular HART id */
#define SBI_HARTMASK_INIT_EXCEPT(__m, __h) \
do { \
u32 __i = sbi_hartid_to_hartindex(__h); \
bitmap_zero_except(((__m)->bits), __i, SBI_HARTMASK_MAX_BITS); \
} while(0)
bitmap_zero_except(((__m)->bits), (__h), SBI_HARTMASK_MAX_BITS)
/**
* Get underlying bitmap of hartmask
@@ -45,68 +41,37 @@ struct sbi_hartmask {
#define sbi_hartmask_bits(__m) ((__m)->bits)
/**
* Set a HART index in hartmask
* @param i HART index to set
* @param m the hartmask pointer
*/
static inline void sbi_hartmask_set_hartindex(u32 i, struct sbi_hartmask *m)
{
if (i < SBI_HARTMASK_MAX_BITS)
__set_bit(i, m->bits);
}
/**
* Set a HART id in hartmask
* Set a HART in hartmask
* @param h HART id to set
* @param m the hartmask pointer
*/
static inline void sbi_hartmask_set_hartid(u32 h, struct sbi_hartmask *m)
static inline void sbi_hartmask_set_hart(u32 h, struct sbi_hartmask *m)
{
sbi_hartmask_set_hartindex(sbi_hartid_to_hartindex(h), m);
if (h < SBI_HARTMASK_MAX_BITS)
__set_bit(h, m->bits);
}
/**
* Clear a HART index in hartmask
* @param i HART index to clear
* @param m the hartmask pointer
*/
static inline void sbi_hartmask_clear_hartindex(u32 i, struct sbi_hartmask *m)
{
if (i < SBI_HARTMASK_MAX_BITS)
__clear_bit(i, m->bits);
}
/**
* Clear a HART id in hartmask
* Clear a HART in hartmask
* @param h HART id to clear
* @param m the hartmask pointer
*/
static inline void sbi_hartmask_clear_hartid(u32 h, struct sbi_hartmask *m)
static inline void sbi_hartmask_clear_hart(u32 h, struct sbi_hartmask *m)
{
sbi_hartmask_clear_hartindex(sbi_hartid_to_hartindex(h), m);
if (h < SBI_HARTMASK_MAX_BITS)
__clear_bit(h, m->bits);
}
/**
* Test a HART index in hartmask
* @param i HART index to test
* @param m the hartmask pointer
*/
static inline int sbi_hartmask_test_hartindex(u32 i,
const struct sbi_hartmask *m)
{
if (i < SBI_HARTMASK_MAX_BITS)
return __test_bit(i, m->bits);
return 0;
}
/**
* Test a HART id in hartmask
* Test a HART in hartmask
* @param h HART id to test
* @param m the hartmask pointer
*/
static inline int sbi_hartmask_test_hartid(u32 h, const struct sbi_hartmask *m)
static inline int sbi_hartmask_test_hart(u32 h, const struct sbi_hartmask *m)
{
return sbi_hartmask_test_hartindex(sbi_hartid_to_hartindex(h), m);
if (h < SBI_HARTMASK_MAX_BITS)
return __test_bit(h, m->bits);
return 0;
}
/**
@@ -127,18 +92,6 @@ static inline void sbi_hartmask_clear_all(struct sbi_hartmask *dstp)
bitmap_zero(sbi_hartmask_bits(dstp), SBI_HARTMASK_MAX_BITS);
}
/**
* *dstp = *srcp
* @param dstp the hartmask destination
* @param srcp the hartmask source
*/
static inline void sbi_hartmask_copy(struct sbi_hartmask *dstp,
const struct sbi_hartmask *srcp)
{
bitmap_copy(sbi_hartmask_bits(dstp), sbi_hartmask_bits(srcp),
SBI_HARTMASK_MAX_BITS);
}
/**
* *dstp = *src1p & *src2p
* @param dstp the hartmask result
@@ -181,25 +134,8 @@ static inline void sbi_hartmask_xor(struct sbi_hartmask *dstp,
sbi_hartmask_bits(src2p), SBI_HARTMASK_MAX_BITS);
}
/**
* Count of bits in *srcp
* @param srcp the hartmask to count bits in
*
* Return: count of bits set in *srcp
*/
static inline int sbi_hartmask_weight(const struct sbi_hartmask *srcp)
{
return bitmap_weight(sbi_hartmask_bits(srcp), SBI_HARTMASK_MAX_BITS);
}
/**
* Iterate over each HART index in hartmask
* __i hart index
* __m hartmask
*/
#define sbi_hartmask_for_each_hartindex(__i, __m) \
for((__i) = find_first_bit((__m)->bits, SBI_HARTMASK_MAX_BITS); \
(__i) < SBI_HARTMASK_MAX_BITS; \
(__i) = find_next_bit((__m)->bits, SBI_HARTMASK_MAX_BITS, (__i) + 1))
/** Iterate over each HART in hartmask */
#define sbi_hartmask_for_each_hart(__h, __m) \
for_each_set_bit(__h, (__m)->bits, SBI_HARTMASK_MAX_BITS)
#endif

View File

@@ -12,41 +12,13 @@
#include <sbi/sbi_types.h>
/* Opaque declaration of heap control struct */
struct sbi_heap_control;
/* Global heap control structure */
extern struct sbi_heap_control global_hpctrl;
/* Alignment of heap base address and size */
#define HEAP_BASE_ALIGN 1024
struct sbi_scratch;
/** Allocate from heap area */
void *sbi_malloc_from(struct sbi_heap_control *hpctrl, size_t size);
static inline void *sbi_malloc(size_t size)
{
return sbi_malloc_from(&global_hpctrl, size);
}
/** Allocate aligned from heap area */
void *sbi_aligned_alloc_from(struct sbi_heap_control *hpctrl,
size_t alignment,size_t size);
static inline void *sbi_aligned_alloc(size_t alignment, size_t size)
{
return sbi_aligned_alloc_from(&global_hpctrl, alignment, size);
}
void *sbi_malloc(size_t size);
/** Zero allocate from heap area */
void *sbi_zalloc_from(struct sbi_heap_control *hpctrl, size_t size);
static inline void *sbi_zalloc(size_t size)
{
return sbi_zalloc_from(&global_hpctrl, size);
}
void *sbi_zalloc(size_t size);
/** Allocate array from heap area */
static inline void *sbi_calloc(size_t nitems, size_t size)
@@ -54,48 +26,19 @@ static inline void *sbi_calloc(size_t nitems, size_t size)
return sbi_zalloc(nitems * size);
}
static inline void *sbi_calloc_from(struct sbi_heap_control *hpctrl,
size_t nitems, size_t size)
{
return sbi_zalloc_from(hpctrl, nitems * size);
}
/** Free-up to heap area */
void sbi_free_from(struct sbi_heap_control *hpctrl, void *ptr);
static inline void sbi_free(void *ptr)
{
return sbi_free_from(&global_hpctrl, ptr);
}
void sbi_free(void *ptr);
/** Amount (in bytes) of free space in the heap area */
unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl);
static inline unsigned long sbi_heap_free_space(void)
{
return sbi_heap_free_space_from(&global_hpctrl);
}
unsigned long sbi_heap_free_space(void);
/** Amount (in bytes) of used space in the heap area */
unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl);
static inline unsigned long sbi_heap_used_space(void)
{
return sbi_heap_used_space_from(&global_hpctrl);
}
unsigned long sbi_heap_used_space(void);
/** Amount (in bytes) of reserved space in the heap area */
unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl);
static inline unsigned long sbi_heap_reserved_space(void)
{
return sbi_heap_reserved_space_from(&global_hpctrl);
}
unsigned long sbi_heap_reserved_space(void);
/** Initialize heap area */
int sbi_heap_init(struct sbi_scratch *scratch);
int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base,
unsigned long size);
int sbi_heap_alloc_new(struct sbi_heap_control **hpctrl);
#endif

View File

@@ -10,7 +10,6 @@
#ifndef __SBI_HSM_H__
#define __SBI_HSM_H__
#include <sbi/sbi_hartmask.h>
#include <sbi/sbi_types.h>
/** Hart state managment device */
@@ -40,12 +39,8 @@ struct sbi_hsm_device {
*
* For successful non-retentive suspend, the hart will resume from
* the warm boot entry point.
*
* NOTE: mmode_resume_addr(resume address) is optional hence it
* may or may not be honored by the platform. If its not honored
* then platform must ensure to resume from the warmboot address.
*/
int (*hart_suspend)(u32 suspend_type, ulong mmode_resume_addr);
int (*hart_suspend)(u32 suspend_type);
/**
* Perform platform-specific actions to resume from a suspended state.
@@ -63,7 +58,7 @@ const struct sbi_hsm_device *sbi_hsm_get_device(void);
void sbi_hsm_set_device(const struct sbi_hsm_device *dev);
int sbi_hsm_init(struct sbi_scratch *scratch, bool cold_boot);
int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot);
void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch);
int sbi_hsm_hart_start(struct sbi_scratch *scratch,
@@ -77,10 +72,10 @@ int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type,
ulong raddr, ulong rmode, ulong arg1);
bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate,
long newstate);
int __sbi_hsm_hart_get_state(u32 hartindex);
int __sbi_hsm_hart_get_state(u32 hartid);
int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid);
int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
struct sbi_hartmask *mask);
ulong hbase, ulong *out_hmask);
void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch);
void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
u32 hartid);

View File

@@ -1,17 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 MIPS
*
*/
#ifndef __SBI_ILLEGAL_ATOMIC_H__
#define __SBI_ILLEGAL_ATOMIC_H__
#include <sbi/sbi_types.h>
struct sbi_trap_regs;
int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs);
#endif

View File

@@ -12,12 +12,8 @@
#include <sbi/sbi_types.h>
struct sbi_trap_context;
struct sbi_trap_regs;
typedef int (*illegal_insn_func)(ulong insn, struct sbi_trap_regs *regs);
int truly_illegal_insn(ulong insn, struct sbi_trap_regs *regs);
int sbi_illegal_insn_handler(struct sbi_trap_context *tcntx);
int sbi_illegal_insn_handler(ulong insn, struct sbi_trap_regs *regs);
#endif

View File

@@ -16,11 +16,9 @@ struct sbi_scratch;
void __noreturn sbi_init(struct sbi_scratch *scratch);
void sbi_revert_entry_count(struct sbi_scratch *scratch);
unsigned long sbi_entry_count(u32 hartid);
unsigned long sbi_entry_count(u32 hartindex);
unsigned long sbi_init_count(u32 hartindex);
unsigned long sbi_init_count(u32 hartid);
void __noreturn sbi_exit(struct sbi_scratch *scratch);

View File

@@ -14,7 +14,7 @@
/* clang-format off */
#define SBI_IPI_EVENT_MAX (8 * __SIZEOF_LONG__)
#define SBI_IPI_EVENT_MAX __riscv_xlen
/* clang-format on */
@@ -23,11 +23,11 @@ struct sbi_ipi_device {
/** Name of the IPI device */
char name[32];
/** Send IPI to a target HART index */
void (*ipi_send)(u32 hart_index);
/** Send IPI to a target HART */
void (*ipi_send)(u32 target_hart);
/** Clear IPI for the current hart */
void (*ipi_clear)(void);
/** Clear IPI for a target HART */
void (*ipi_clear)(u32 target_hart);
};
enum sbi_ipi_update_type {
@@ -54,7 +54,7 @@ struct sbi_ipi_event_ops {
*/
int (* update)(struct sbi_scratch *scratch,
struct sbi_scratch *remote_scratch,
u32 remote_hartindex, void *data);
u32 remote_hartid, void *data);
/**
* Sync callback to wait for remote HART
@@ -85,9 +85,9 @@ int sbi_ipi_send_halt(ulong hmask, ulong hbase);
void sbi_ipi_process(void);
int sbi_ipi_raw_send(u32 hartindex);
int sbi_ipi_raw_send(u32 target_hart);
void sbi_ipi_raw_clear(void);
void sbi_ipi_raw_clear(u32 target_hart);
const struct sbi_ipi_device *sbi_ipi_get_device(void);

View File

@@ -10,22 +10,20 @@
#ifndef __SBI_IRQCHIP_H__
#define __SBI_IRQCHIP_H__
#include <sbi/sbi_list.h>
#include <sbi/sbi_types.h>
struct sbi_scratch;
struct sbi_trap_regs;
/** irqchip hardware device */
struct sbi_irqchip_device {
/** Node in the list of irqchip devices */
struct sbi_dlist node;
/** Initialize per-hart state for the current hart */
int (*warm_init)(struct sbi_irqchip_device *dev);
/** Handle an IRQ from this irqchip */
int (*irq_handle)(void);
};
/**
* Set external interrupt handling function
*
* This function is called by OpenSBI platform code to set a handler for
* external interrupts
*
* @param fn function pointer for handling external irqs
*/
void sbi_irqchip_set_irqfn(int (*fn)(struct sbi_trap_regs *regs));
/**
* Process external interrupts
@@ -35,10 +33,7 @@ struct sbi_irqchip_device {
*
* @param regs pointer for trap registers
*/
int sbi_irqchip_process(void);
/** Register an irqchip device to receive callbacks */
void sbi_irqchip_add_device(struct sbi_irqchip_device *dev);
int sbi_irqchip_process(struct sbi_trap_regs *regs);
/** Initialize interrupt controllers */
int sbi_irqchip_init(struct sbi_scratch *scratch, bool cold_boot);

View File

@@ -160,17 +160,4 @@ static inline void sbi_list_del_init(struct sbi_dlist *entry)
&pos->member != (head); \
pos = sbi_list_entry(pos->member.next, typeof(*pos), member))
/**
* Iterate over list of given type safe against removal of list entry
* @param pos the type * to use as a loop cursor.
* @param n another type * to use as temporary storage.
* @param head the head for your list.
* @param member the name of the list_struct within the struct.
*/
#define sbi_list_for_each_entry_safe(pos, n, head, member) \
for (pos = sbi_list_entry((head)->next, typeof(*pos), member), \
n = sbi_list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = sbi_list_entry(pos->member.next, typeof(*pos), member))
#endif

View File

@@ -0,0 +1,23 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __SBI_MISALIGNED_LDST_H__
#define __SBI_MISALIGNED_LDST_H__
#include <sbi/sbi_types.h>
struct sbi_trap_regs;
int sbi_misaligned_load_handler(ulong addr, ulong tval2, ulong tinst,
struct sbi_trap_regs *regs);
int sbi_misaligned_store_handler(ulong addr, ulong tval2, ulong tinst,
struct sbi_trap_regs *regs);
#endif

View File

@@ -1,185 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Rahul Pathak <rpathak@ventanamicro.com>
*/
#ifndef __SBI_MPXY_H__
#define __SBI_MPXY_H__
#include <sbi/sbi_list.h>
struct sbi_scratch;
#define SBI_MPXY_MSGPROTO_VERSION(Major, Minor) ((Major << 16) | Minor)
enum sbi_mpxy_attr_id {
/* Standard channel attributes managed by MPXY framework */
SBI_MPXY_ATTR_MSG_PROT_ID = 0x00000000,
SBI_MPXY_ATTR_MSG_PROT_VER = 0x00000001,
SBI_MPXY_ATTR_MSG_MAX_LEN = 0x00000002,
SBI_MPXY_ATTR_MSG_SEND_TIMEOUT = 0x00000003,
SBI_MPXY_ATTR_MSG_COMPLETION_TIMEOUT = 0x00000004,
SBI_MPXY_ATTR_CHANNEL_CAPABILITY = 0x00000005,
SBI_MPXY_ATTR_SSE_EVENT_ID = 0x00000006,
SBI_MPXY_ATTR_MSI_CONTROL = 0x00000007,
SBI_MPXY_ATTR_MSI_ADDR_LO = 0x00000008,
SBI_MPXY_ATTR_MSI_ADDR_HI = 0x00000009,
SBI_MPXY_ATTR_MSI_DATA = 0x0000000A,
SBI_MPXY_ATTR_EVENTS_STATE_CONTROL = 0x0000000B,
SBI_MPXY_ATTR_STD_ATTR_MAX_IDX,
/* Message protocol specific attributes, managed by
* message protocol driver */
SBI_MPXY_ATTR_MSGPROTO_ATTR_START = 0x80000000,
SBI_MPXY_ATTR_MSGPROTO_ATTR_END = 0xffffffff
};
/**
* SBI MPXY Message Protocol IDs
*/
enum sbi_mpxy_msgproto_id {
SBI_MPXY_MSGPROTO_RPMI_ID = 0x00000000,
SBI_MPXY_MSGPROTO_MAX_IDX,
/** Vendor specific message protocol IDs */
SBI_MPXY_MSGPROTO_VENDOR_START = 0x80000000,
SBI_MPXY_MSGPROTO_VENDOR_END = 0xffffffff
};
enum SBI_EXT_MPXY_SHMEM_FLAGS {
SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE = 0b00,
SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN = 0b01,
SBI_EXT_MPXY_SHMEM_FLAG_MAX_IDX
};
struct sbi_mpxy_msi_info {
/* MSI target address low 32-bit */
u32 msi_addr_lo;
/* MSI target address high 32-bit */
u32 msi_addr_hi;
/* MSI data */
u32 msi_data;
};
/**
* Channel attributes.
* NOTE: The sequence of attribute fields are as per the
* defined sequence in the attribute table in spec(or as
* per the enum sbi_mpxy_attr_id).
*/
struct sbi_mpxy_channel_attrs {
/* Message protocol ID */
u32 msg_proto_id;
/* Message protocol Version */
u32 msg_proto_version;
/* Message protocol maximum message data length(bytes) */
u32 msg_data_maxlen;
/* Message protocol message send timeout
* in microseconds */
u32 msg_send_timeout;
/* Message protocol message response timeout in
* microseconds. Its the aggregate of msg_send_timeout
* and the timeout in receiving the response */
u32 msg_completion_timeout;
/* Bit array for channel capabilities */
u32 capability;
u32 sse_event_id;
u32 msi_control;
struct sbi_mpxy_msi_info msi_info;
/* Events State Control */
u32 eventsstate_ctrl;
};
/** A Message proxy channel accessible through SBI interface */
struct sbi_mpxy_channel {
/** List head to a set of channels */
struct sbi_dlist head;
u32 channel_id;
struct sbi_mpxy_channel_attrs attrs;
/**
* Read message protocol attributes
* NOTE: inmem requires little-endian byte-ordering
*/
int (*read_attributes)(struct sbi_mpxy_channel *channel,
u32 *outmem,
u32 base_attr_id,
u32 attr_count);
/**
* Write message protocol attributes
* NOTE: outmem requires little-endian byte-ordering
*/
int (*write_attributes)(struct sbi_mpxy_channel *channel,
u32 *inmem,
u32 base_attr_id,
u32 attr_count);
/**
* Send a message and wait for response
* NOTE: msgbuf requires little-endian byte-ordering
*/
int (*send_message_with_response)(struct sbi_mpxy_channel *channel,
u32 msg_id, void *msgbuf, u32 msg_len,
void *respbuf, u32 resp_max_len,
unsigned long *resp_len);
/** Send message without response */
int (*send_message_without_response)(struct sbi_mpxy_channel *channel,
u32 msg_id, void *msgbuf, u32 msg_len);
/**
* Get notifications events if supported on a channel
* NOTE: eventsbuf requires little-endian byte-ordering
*/
int (*get_notification_events)(struct sbi_mpxy_channel *channel,
void *eventsbuf, u32 bufsize,
unsigned long *events_len);
/**
* Callback to enable the events state reporting
* in the message protocol implementation
*/
void (*switch_eventsstate)(u32 enable);
};
/** Register a Message proxy channel */
int sbi_mpxy_register_channel(struct sbi_mpxy_channel *channel);
/** Initialize Message proxy subsystem */
int sbi_mpxy_init(struct sbi_scratch *scratch);
/** Check if some Message proxy channel is available */
bool sbi_mpxy_channel_available(void);
/** Get message proxy shared memory size */
unsigned long sbi_mpxy_get_shmem_size(void);
/** Set message proxy shared memory on the calling HART */
int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo,
unsigned long shmem_phys_hi,
unsigned long flags);
/** Get channel IDs list */
int sbi_mpxy_get_channel_ids(u32 start_index);
/** Read MPXY channel attributes */
int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count);
/** Write MPXY channel attributes */
int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count);
/**
* Send a message over a MPXY channel.
* In case if response is not expected, resp_data_len will be NULL.
*/
int sbi_mpxy_send_message(u32 channel_id, u8 msg_id,
unsigned long msg_data_len,
unsigned long *resp_data_len);
/** Get Message proxy notification events */
int sbi_mpxy_get_notification_events(u32 channel_id,
unsigned long *events_len);
#endif

View File

@@ -39,8 +39,6 @@
#define SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET (0x60 + __SIZEOF_POINTER__)
/** Offset of hart_index2id in struct sbi_platform */
#define SBI_PLATFORM_HART_INDEX2ID_OFFSET (0x60 + (__SIZEOF_POINTER__ * 2))
/** Offset of cbom_block_size in struct sbi_platform */
#define SBI_PLATFORM_CBOM_BLOCK_SIZE_OFFSET (0x60 + (__SIZEOF_POINTER__ * 3))
#define SBI_PLATFORM_TLB_RANGE_FLUSH_LIMIT_DEFAULT (1UL << 12)
@@ -50,13 +48,11 @@
#include <sbi/sbi_error.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_version.h>
#include <sbi/sbi_trap_ldst.h>
struct sbi_domain_memregion;
struct sbi_ecall_return;
struct sbi_trap_info;
struct sbi_trap_regs;
struct sbi_hart_features;
union sbi_ldst_data;
/** Possible feature flags of a platform */
enum sbi_platform_features {
@@ -113,42 +109,34 @@ struct sbi_platform_operations {
/** Get platform specific mhpmevent value */
uint64_t (*pmu_xlate_to_mhpmevent)(uint32_t event_idx, uint64_t data);
/** Initialize the platform interrupt controller during cold boot */
int (*irqchip_init)(void);
/** Initialize the platform console */
int (*console_init)(void);
/** Initialize IPI during cold boot */
int (*ipi_init)(void);
/** Initialize the platform interrupt controller for current HART */
int (*irqchip_init)(bool cold_boot);
/** Exit the platform interrupt controller for current HART */
void (*irqchip_exit)(void);
/** Initialize IPI for current HART */
int (*ipi_init)(bool cold_boot);
/** Exit IPI for current HART */
void (*ipi_exit)(void);
/** Get tlb flush limit value **/
u64 (*get_tlbr_flush_limit)(void);
/** Get tlb fifo num entries*/
u32 (*get_tlb_num_entries)(void);
/** Initialize platform timer during cold boot */
int (*timer_init)(void);
/** Initialize the platform Message Proxy(MPXY) driver */
int (*mpxy_init)(void);
/** Initialize platform timer for current HART */
int (*timer_init)(bool cold_boot);
/** Exit platform timer for current HART */
void (*timer_exit)(void);
/** Check if SBI vendor extension is implemented or not */
bool (*vendor_ext_check)(void);
/** platform specific SBI extension implementation provider */
int (*vendor_ext_provider)(long funcid,
struct sbi_trap_regs *regs,
struct sbi_ecall_return *out);
/** platform specific handler to fixup load fault */
int (*emulate_load)(int rlen, unsigned long addr,
union sbi_ldst_data *out_val);
/** platform specific handler to fixup store fault */
int (*emulate_store)(int wlen, unsigned long addr,
union sbi_ldst_data in_val);
/** platform specific pmp setup on current HART */
void (*pmp_set)(unsigned int n, unsigned long flags,
unsigned long prot, unsigned long addr,
unsigned long log2len);
/** platform specific pmp disable on current HART */
void (*pmp_disable)(unsigned int n);
const struct sbi_trap_regs *regs,
unsigned long *out_value,
struct sbi_trap_info *out_trap);
};
/** Platform default per-HART stack size for exception/interrupt handling */
@@ -156,7 +144,7 @@ struct sbi_platform_operations {
/** Platform default heap size */
#define SBI_PLATFORM_DEFAULT_HEAP_SIZE(__num_hart) \
(0x8000 + 0x1000 * (__num_hart))
(0x8000 + 0x800 * (__num_hart))
/** Representation of a platform */
struct sbi_platform {
@@ -176,7 +164,7 @@ struct sbi_platform {
char name[64];
/** Supported features */
u64 features;
/** Total number of HARTs (at most SBI_HARTMASK_MAX_BITS) */
/** Total number of HARTs */
u32 hart_count;
/** Per-HART stack size for exception/interrupt handling */
u32 hart_stack_size;
@@ -191,34 +179,70 @@ struct sbi_platform {
/**
* HART index to HART id table
*
* If hart_index2id != NULL then the table must contain a mapping
* for each HART index 0 <= <abc> < hart_count:
* For used HART index <abc>:
* hart_index2id[<abc>] = some HART id
* For unused HART index <abc>:
* hart_index2id[<abc>] = -1U
*
* If hart_index2id == NULL then we assume identity mapping
* hart_index2id[<abc>] = <abc>
*
* We have only two restrictions:
* 1. HART index < sbi_platform hart_count
* 2. HART id < SBI_HARTMASK_MAX_BITS
*/
const u32 *hart_index2id;
/** Allocation alignment for Scratch */
unsigned long cbom_block_size;
};
/**
* Prevent modification of struct sbi_platform from affecting
* SBI_PLATFORM_xxx_OFFSET
*/
assert_member_offset(struct sbi_platform, opensbi_version, SBI_PLATFORM_OPENSBI_VERSION_OFFSET);
assert_member_offset(struct sbi_platform, platform_version, SBI_PLATFORM_VERSION_OFFSET);
assert_member_offset(struct sbi_platform, name, SBI_PLATFORM_NAME_OFFSET);
assert_member_offset(struct sbi_platform, features, SBI_PLATFORM_FEATURES_OFFSET);
assert_member_offset(struct sbi_platform, hart_count, SBI_PLATFORM_HART_COUNT_OFFSET);
assert_member_offset(struct sbi_platform, hart_stack_size, SBI_PLATFORM_HART_STACK_SIZE_OFFSET);
assert_member_offset(struct sbi_platform, heap_size, SBI_PLATFORM_HEAP_SIZE_OFFSET);
assert_member_offset(struct sbi_platform, reserved, SBI_PLATFORM_RESERVED_OFFSET);
assert_member_offset(struct sbi_platform, platform_ops_addr, SBI_PLATFORM_OPS_OFFSET);
assert_member_offset(struct sbi_platform, firmware_context, SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET);
assert_member_offset(struct sbi_platform, hart_index2id, SBI_PLATFORM_HART_INDEX2ID_OFFSET);
assert_member_offset(struct sbi_platform, cbom_block_size, SBI_PLATFORM_CBOM_BLOCK_SIZE_OFFSET);
_Static_assert(
offsetof(struct sbi_platform, opensbi_version)
== SBI_PLATFORM_OPENSBI_VERSION_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_OPENSBI_VERSION_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, platform_version)
== SBI_PLATFORM_VERSION_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_VERSION_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, name)
== SBI_PLATFORM_NAME_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_NAME_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, features)
== SBI_PLATFORM_FEATURES_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_FEATURES_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_count)
== SBI_PLATFORM_HART_COUNT_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_COUNT_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_stack_size)
== SBI_PLATFORM_HART_STACK_SIZE_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_STACK_SIZE_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, platform_ops_addr)
== SBI_PLATFORM_OPS_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_OPS_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, firmware_context)
== SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET");
_Static_assert(
offsetof(struct sbi_platform, hart_index2id)
== SBI_PLATFORM_HART_INDEX2ID_OFFSET,
"struct sbi_platform definition has changed, please redefine "
"SBI_PLATFORM_HART_INDEX2ID_OFFSET");
/** Get pointer to sbi_platform for sbi_scratch pointer */
#define sbi_platform_ptr(__s) \
@@ -234,6 +258,16 @@ assert_member_offset(struct sbi_platform, cbom_block_size, SBI_PLATFORM_CBOM_BLO
#define sbi_platform_has_mfaults_delegation(__p) \
((__p)->features & SBI_PLATFORM_HAS_MFAULTS_DELEGATION)
/**
* Get HART index for the given HART
*
* @param plat pointer to struct sbi_platform
* @param hartid HART ID
*
* @return 0 <= value < hart_count for valid HART otherwise -1U
*/
u32 sbi_platform_hart_index(const struct sbi_platform *plat, u32 hartid);
/**
* Get the platform features in string format
*
@@ -291,20 +325,6 @@ static inline u64 sbi_platform_tlbr_flush_limit(const struct sbi_platform *plat)
return SBI_PLATFORM_TLB_RANGE_FLUSH_LIMIT_DEFAULT;
}
/**
* Get platform specific tlb fifo num entries.
*
* @param plat pointer to struct sbi_platform
*
* @return number of tlb fifo entries
*/
static inline u32 sbi_platform_tlb_fifo_num_entries(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->get_tlb_num_entries)
return sbi_platform_ops(plat)->get_tlb_num_entries();
return sbi_hart_count();
}
/**
* Get total number of HARTs supported by the platform
*
@@ -333,6 +353,24 @@ static inline u32 sbi_platform_hart_stack_size(const struct sbi_platform *plat)
return 0;
}
/**
* Check whether given HART is invalid
*
* @param plat pointer to struct sbi_platform
* @param hartid HART ID
*
* @return true if HART is invalid and false otherwise
*/
static inline bool sbi_platform_hart_invalid(const struct sbi_platform *plat,
u32 hartid)
{
if (!plat)
return true;
if (plat->hart_count <= sbi_platform_hart_index(plat, hartid))
return true;
return false;
}
/**
* Check whether given HART is allowed to do cold boot
*
@@ -515,59 +553,98 @@ static inline uint64_t sbi_platform_pmu_xlate_to_mhpmevent(const struct sbi_plat
}
/**
* Initialize the platform interrupt controller during cold boot
* Initialize the platform console
*
* @param plat pointer to struct sbi_platform
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_irqchip_init(const struct sbi_platform *plat)
static inline int sbi_platform_console_init(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->console_init)
return sbi_platform_ops(plat)->console_init();
return 0;
}
/**
* Initialize the platform interrupt controller for current HART
*
* @param plat pointer to struct sbi_platform
* @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_irqchip_init(const struct sbi_platform *plat,
bool cold_boot)
{
if (plat && sbi_platform_ops(plat)->irqchip_init)
return sbi_platform_ops(plat)->irqchip_init();
return sbi_platform_ops(plat)->irqchip_init(cold_boot);
return 0;
}
/**
* Initialize the platform IPI support during cold boot
* Exit the platform interrupt controller for current HART
*
* @param plat pointer to struct sbi_platform
*/
static inline void sbi_platform_irqchip_exit(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->irqchip_exit)
sbi_platform_ops(plat)->irqchip_exit();
}
/**
* Initialize the platform IPI support for current HART
*
* @param plat pointer to struct sbi_platform
* @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_ipi_init(const struct sbi_platform *plat)
static inline int sbi_platform_ipi_init(const struct sbi_platform *plat,
bool cold_boot)
{
if (plat && sbi_platform_ops(plat)->ipi_init)
return sbi_platform_ops(plat)->ipi_init();
return sbi_platform_ops(plat)->ipi_init(cold_boot);
return 0;
}
/**
* Initialize the platform timer during cold boot
* Exit the platform IPI support for current HART
*
* @param plat pointer to struct sbi_platform
*/
static inline void sbi_platform_ipi_exit(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->ipi_exit)
sbi_platform_ops(plat)->ipi_exit();
}
/**
* Initialize the platform timer for current HART
*
* @param plat pointer to struct sbi_platform
* @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_timer_init(const struct sbi_platform *plat)
static inline int sbi_platform_timer_init(const struct sbi_platform *plat,
bool cold_boot)
{
if (plat && sbi_platform_ops(plat)->timer_init)
return sbi_platform_ops(plat)->timer_init();
return sbi_platform_ops(plat)->timer_init(cold_boot);
return 0;
}
/**
* Initialize the platform Message Proxy drivers
* Exit the platform timer for current HART
*
* @param plat pointer to struct sbi_platform
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_mpxy_init(const struct sbi_platform *plat)
static inline void sbi_platform_timer_exit(const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->mpxy_init)
return sbi_platform_ops(plat)->mpxy_init();
return 0;
if (plat && sbi_platform_ops(plat)->timer_exit)
sbi_platform_ops(plat)->timer_exit();
}
/**
@@ -580,7 +657,10 @@ static inline int sbi_platform_mpxy_init(const struct sbi_platform *plat)
static inline bool sbi_platform_vendor_ext_check(
const struct sbi_platform *plat)
{
return plat && sbi_platform_ops(plat)->vendor_ext_provider;
if (plat && sbi_platform_ops(plat)->vendor_ext_check)
return sbi_platform_ops(plat)->vendor_ext_check();
return false;
}
/**
@@ -597,92 +677,20 @@ static inline bool sbi_platform_vendor_ext_check(
static inline int sbi_platform_vendor_ext_provider(
const struct sbi_platform *plat,
long funcid,
struct sbi_trap_regs *regs,
struct sbi_ecall_return *out)
const struct sbi_trap_regs *regs,
unsigned long *out_value,
struct sbi_trap_info *out_trap)
{
if (plat && sbi_platform_ops(plat)->vendor_ext_provider)
if (plat && sbi_platform_ops(plat)->vendor_ext_provider) {
return sbi_platform_ops(plat)->vendor_ext_provider(funcid,
regs, out);
return SBI_ENOTSUPP;
}
/**
* Ask platform to emulate the trapped load
*
* @param plat pointer to struct sbi_platform
* @param rlen length of the load: 1/2/4/8...
* @param addr virtual address of the load. Platform needs to page-walk and
* find the physical address if necessary
* @param out_val value loaded
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_emulate_load(const struct sbi_platform *plat,
int rlen, unsigned long addr,
union sbi_ldst_data *out_val)
{
if (plat && sbi_platform_ops(plat)->emulate_load) {
return sbi_platform_ops(plat)->emulate_load(rlen, addr,
out_val);
regs,
out_value,
out_trap);
}
return SBI_ENOTSUPP;
}
/**
* Ask platform to emulate the trapped store
*
* @param plat pointer to struct sbi_platform
* @param wlen length of the store: 1/2/4/8...
* @param addr virtual address of the store. Platform needs to page-walk and
* find the physical address if necessary
* @param in_val value to store
*
* @return 0 on success and negative error code on failure
*/
static inline int sbi_platform_emulate_store(const struct sbi_platform *plat,
int wlen, unsigned long addr,
union sbi_ldst_data in_val)
{
if (plat && sbi_platform_ops(plat)->emulate_store) {
return sbi_platform_ops(plat)->emulate_store(wlen, addr,
in_val);
}
return SBI_ENOTSUPP;
}
/**
* Platform specific PMP setup on current HART
*
* @param plat pointer to struct sbi_platform
* @param n index of the pmp entry
* @param flags domain memregion flags
* @param prot attribute of the pmp entry
* @param addr address of the pmp entry
* @param log2len size of the pmp entry as power-of-2
*/
static inline void sbi_platform_pmp_set(const struct sbi_platform *plat,
unsigned int n, unsigned long flags,
unsigned long prot, unsigned long addr,
unsigned long log2len)
{
if (plat && sbi_platform_ops(plat)->pmp_set)
sbi_platform_ops(plat)->pmp_set(n, flags, prot, addr, log2len);
}
/**
* Platform specific PMP disable on current HART
*
* @param plat pointer to struct sbi_platform
* @param n index of the pmp entry
*/
static inline void sbi_platform_pmp_disable(const struct sbi_platform *plat,
unsigned int n)
{
if (plat && sbi_platform_ops(plat)->pmp_disable)
sbi_platform_ops(plat)->pmp_disable(n);
}
#endif
#endif

View File

@@ -11,7 +11,6 @@
#define __SBI_PMU_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_trap.h>
struct sbi_scratch;
@@ -24,7 +23,6 @@ struct sbi_scratch;
#define SBI_PMU_HW_CTR_MAX 32
#define SBI_PMU_CTR_MAX (SBI_PMU_HW_CTR_MAX + SBI_PMU_FW_CTR_MAX)
#define SBI_PMU_FIXED_CTR_MASK 0x07
#define SBI_PMU_CY_IR_MASK 0x05
struct sbi_pmu_device {
/** Name of the PMU platform device */
@@ -91,12 +89,6 @@ struct sbi_pmu_device {
* Custom function returning the machine-specific irq-bit.
*/
int (*hw_counter_irq_bit)(void);
/**
* Custom function to inhibit counting of events while in
* specified mode.
*/
void (*hw_counter_filter_mode)(unsigned long flags, int counter_index);
};
/** Get the PMU platform device */
@@ -114,9 +106,6 @@ void sbi_pmu_exit(struct sbi_scratch *scratch);
/** Return the pmu irq bit depending on extension existence */
int sbi_pmu_irq_bit(void);
/** Return the pmu irq mask or 0 if the pmu overflow irq is not supported */
unsigned long sbi_pmu_irq_mask(void);
/**
* Add the hardware event to counter mapping information. This should be called
* from the platform code to update the mapping table.
@@ -145,8 +134,6 @@ int sbi_pmu_ctr_start(unsigned long cidx_base, unsigned long cidx_mask,
unsigned long flags, uint64_t ival);
int sbi_pmu_ctr_get_info(uint32_t cidx, unsigned long *ctr_info);
int sbi_pmu_event_get_info(unsigned long shmem_lo, unsigned long shmem_high,
unsigned long num_events, unsigned long flags);
unsigned long sbi_pmu_num_ctr(void);
@@ -156,6 +143,4 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
int sbi_pmu_ctr_incr_fw(enum sbi_pmu_fw_event_code_id fw_id);
void sbi_pmu_ovf_irq();
#endif

View File

@@ -22,7 +22,7 @@
#define SBI_SCRATCH_FW_RW_OFFSET (2 * __SIZEOF_POINTER__)
/** Offset of fw_heap_offset member in sbi_scratch */
#define SBI_SCRATCH_FW_HEAP_OFFSET (3 * __SIZEOF_POINTER__)
/** Offset of fw_heap_size member in sbi_scratch */
/** Offset of fw_heap_size_offset member in sbi_scratch */
#define SBI_SCRATCH_FW_HEAP_SIZE_OFFSET (4 * __SIZEOF_POINTER__)
/** Offset of next_arg1 member in sbi_scratch */
#define SBI_SCRATCH_NEXT_ARG1_OFFSET (5 * __SIZEOF_POINTER__)
@@ -36,16 +36,14 @@
#define SBI_SCRATCH_PLATFORM_ADDR_OFFSET (9 * __SIZEOF_POINTER__)
/** Offset of hartid_to_scratch member in sbi_scratch */
#define SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET (10 * __SIZEOF_POINTER__)
/** Offset of trap_context member in sbi_scratch */
#define SBI_SCRATCH_TRAP_CONTEXT_OFFSET (11 * __SIZEOF_POINTER__)
/** Offset of trap_exit member in sbi_scratch */
#define SBI_SCRATCH_TRAP_EXIT_OFFSET (11 * __SIZEOF_POINTER__)
/** Offset of tmp0 member in sbi_scratch */
#define SBI_SCRATCH_TMP0_OFFSET (12 * __SIZEOF_POINTER__)
/** Offset of options member in sbi_scratch */
#define SBI_SCRATCH_OPTIONS_OFFSET (13 * __SIZEOF_POINTER__)
/** Offset of hartindex member in sbi_scratch */
#define SBI_SCRATCH_HARTINDEX_OFFSET (14 * __SIZEOF_POINTER__)
/** Offset of extra space in sbi_scratch */
#define SBI_SCRATCH_EXTRA_SPACE_OFFSET (15 * __SIZEOF_POINTER__)
#define SBI_SCRATCH_EXTRA_SPACE_OFFSET (14 * __SIZEOF_POINTER__)
/** Maximum size of sbi_scratch (4KB) */
#define SBI_SCRATCH_SIZE (0x1000)
@@ -79,35 +77,73 @@ struct sbi_scratch {
unsigned long platform_addr;
/** Address of HART ID to sbi_scratch conversion function */
unsigned long hartid_to_scratch;
/** Address of current trap context */
unsigned long trap_context;
/** Address of trap exit function */
unsigned long trap_exit;
/** Temporary storage */
unsigned long tmp0;
/** Options for OpenSBI library */
unsigned long options;
/** Index of the hart */
unsigned long hartindex;
};
/**
* Prevent modification of struct sbi_scratch from affecting
* SBI_SCRATCH_xxx_OFFSET
*/
assert_member_offset(struct sbi_scratch, fw_start, SBI_SCRATCH_FW_START_OFFSET);
assert_member_offset(struct sbi_scratch, fw_size, SBI_SCRATCH_FW_SIZE_OFFSET);
assert_member_offset(struct sbi_scratch, fw_rw_offset, SBI_SCRATCH_FW_RW_OFFSET);
assert_member_offset(struct sbi_scratch, fw_heap_offset, SBI_SCRATCH_FW_HEAP_OFFSET);
assert_member_offset(struct sbi_scratch, fw_heap_size, SBI_SCRATCH_FW_HEAP_SIZE_OFFSET);
assert_member_offset(struct sbi_scratch, next_arg1, SBI_SCRATCH_NEXT_ARG1_OFFSET);
assert_member_offset(struct sbi_scratch, next_addr, SBI_SCRATCH_NEXT_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, next_mode, SBI_SCRATCH_NEXT_MODE_OFFSET);
assert_member_offset(struct sbi_scratch, warmboot_addr, SBI_SCRATCH_WARMBOOT_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, platform_addr, SBI_SCRATCH_PLATFORM_ADDR_OFFSET);
assert_member_offset(struct sbi_scratch, hartid_to_scratch, SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET);
assert_member_offset(struct sbi_scratch, trap_context, SBI_SCRATCH_TRAP_CONTEXT_OFFSET);
assert_member_offset(struct sbi_scratch, tmp0, SBI_SCRATCH_TMP0_OFFSET);
assert_member_offset(struct sbi_scratch, options, SBI_SCRATCH_OPTIONS_OFFSET);
assert_member_offset(struct sbi_scratch, hartindex, SBI_SCRATCH_HARTINDEX_OFFSET);
_Static_assert(
offsetof(struct sbi_scratch, fw_start)
== SBI_SCRATCH_FW_START_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_FW_START_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, fw_size)
== SBI_SCRATCH_FW_SIZE_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_FW_SIZE_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_arg1)
== SBI_SCRATCH_NEXT_ARG1_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_ARG1_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_addr)
== SBI_SCRATCH_NEXT_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, next_mode)
== SBI_SCRATCH_NEXT_MODE_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_NEXT_MODE_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, warmboot_addr)
== SBI_SCRATCH_WARMBOOT_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_WARMBOOT_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, platform_addr)
== SBI_SCRATCH_PLATFORM_ADDR_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_PLATFORM_ADDR_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, hartid_to_scratch)
== SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, trap_exit)
== SBI_SCRATCH_TRAP_EXIT_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_TRAP_EXIT_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, tmp0)
== SBI_SCRATCH_TMP0_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_TMP0_OFFSET");
_Static_assert(
offsetof(struct sbi_scratch, options)
== SBI_SCRATCH_OPTIONS_OFFSET,
"struct sbi_scratch definition has changed, please redefine "
"SBI_SCRATCH_OPTIONS_OFFSET");
/** Possible options for OpenSBI library */
enum sbi_scratch_options {
@@ -119,7 +155,7 @@ enum sbi_scratch_options {
/** Get pointer to sbi_scratch for current HART */
#define sbi_scratch_thishart_ptr() \
((struct sbi_scratch *)csr_read_relaxed(CSR_MSCRATCH))
((struct sbi_scratch *)csr_read(CSR_MSCRATCH))
/** Get Arg1 of next booting stage for current HART */
#define sbi_scratch_thishart_arg1_ptr() \
@@ -166,58 +202,18 @@ do { \
= (__type)(__ptr); \
} while (0)
/** Get the hart index of the current hart */
#define current_hartindex() \
(sbi_scratch_thishart_ptr()->hartindex)
/** Number of harts managed by this OpenSBI instance */
extern u32 sbi_scratch_hart_count;
/** Get the number of harts managed by this OpenSBI instance */
#define sbi_hart_count() sbi_scratch_hart_count
/** Iterate over the harts managed by this OpenSBI instance */
#define sbi_for_each_hartindex(__var) \
for (u32 __var = 0; __var < sbi_hart_count(); ++__var)
/** Check whether a particular HART index is valid or not */
#define sbi_hartindex_valid(__hartindex) ((__hartindex) < sbi_hart_count())
/** HART index to HART id table */
extern u32 hartindex_to_hartid_table[];
/** Get sbi_scratch from HART index */
#define sbi_hartindex_to_hartid(__hartindex) \
({ \
((__hartindex) < SBI_HARTMASK_MAX_BITS) ? \
hartindex_to_hartid_table[__hartindex] : -1U; \
})
/** HART index to scratch table */
extern struct sbi_scratch *hartindex_to_scratch_table[];
/** Get sbi_scratch from HART index */
#define sbi_hartindex_to_scratch(__hartindex) \
({ \
((__hartindex) < SBI_HARTMASK_MAX_BITS) ? \
hartindex_to_scratch_table[__hartindex] : NULL; \
})
/**
* Get logical index for given HART id
* @param hartid physical HART id
* @returns value between 0 to SBI_HARTMASK_MAX_BITS upon success and
* SBI_HARTMASK_MAX_BITS upon failure.
*/
u32 sbi_hartid_to_hartindex(u32 hartid);
/** HART id to scratch table */
extern struct sbi_scratch *hartid_to_scratch_table[];
/** Get sbi_scratch from HART id */
#define sbi_hartid_to_scratch(__hartid) \
sbi_hartindex_to_scratch(sbi_hartid_to_hartindex(__hartid))
hartid_to_scratch_table[__hartid]
/** Check whether particular HART id is valid or not */
#define sbi_hartid_valid(__hartid) \
sbi_hartindex_valid(sbi_hartid_to_hartindex(__hartid))
/** Last HART id having a sbi_scratch pointer */
extern u32 last_hartid_having_scratch;
/** Get last HART id having a sbi_scratch pointer */
#define sbi_scratch_last_hartid() last_hartid_having_scratch
#endif

View File

@@ -1,33 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Simple simply-linked list library.
*
* Copyright (c) 2025 Rivos Inc.
*
* Authors:
* Clément Léger <cleger@rivosinc.com>
*/
#ifndef __SBI_SLIST_H__
#define __SBI_SLIST_H__
#include <sbi/sbi_types.h>
#define SBI_SLIST_HEAD_INIT(_ptr) (_ptr)
#define SBI_SLIST_HEAD(_lname, _stype) struct _stype *_lname
#define SBI_SLIST_NODE(_stype) SBI_SLIST_HEAD(next, _stype)
#define SBI_SLIST_NODE_INIT(_ptr) .next = _ptr
#define SBI_INIT_SLIST_HEAD(_head) (_head) = NULL
#define SBI_SLIST_ADD(_ptr, _head) \
do { \
(_ptr)->next = _head; \
(_head) = _ptr; \
} while (0)
#define SBI_SLIST_FOR_EACH_ENTRY(_ptr, _head) \
for (_ptr = _head; _ptr; _ptr = _ptr->next)
#endif

View File

@@ -1,95 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Rivos Systems.
*/
#ifndef __SBI_SSE_H__
#define __SBI_SSE_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
#include <sbi/riscv_locks.h>
struct sbi_scratch;
struct sbi_trap_regs;
struct sbi_ecall_return;
#define EXC_MODE_PP_SHIFT 0
#define EXC_MODE_PP BIT(EXC_MODE_PP_SHIFT)
#define EXC_MODE_PV_SHIFT 1
#define EXC_MODE_PV BIT(EXC_MODE_PV_SHIFT)
#define EXC_MODE_SSTATUS_SPIE_SHIFT 2
#define EXC_MODE_SSTATUS_SPIE BIT(EXC_MODE_SSTATUS_SPIE_SHIFT)
struct sbi_sse_cb_ops {
/**
* Called when hart_id is changed on the event.
*/
void (*set_hartid_cb)(uint32_t event_id, unsigned long hart_id);
/**
* Called when the SBI_EXT_SSE_COMPLETE is invoked on the event.
*/
void (*complete_cb)(uint32_t event_id);
/**
* Called when the SBI_EXT_SSE_REGISTER is invoked on the event.
*/
void (*register_cb)(uint32_t event_id);
/**
* Called when the SBI_EXT_SSE_UNREGISTER is invoked on the event.
*/
void (*unregister_cb)(uint32_t event_id);
/**
* Called when the SBI_EXT_SSE_ENABLE is invoked on the event.
*/
void (*enable_cb)(uint32_t event_id);
/**
* Called when the SBI_EXT_SSE_DISABLE is invoked on the event.
*/
void (*disable_cb)(uint32_t event_id);
};
/* Add a supported event with associated callback operations
* @param event_id Event identifier (SBI_SSE_EVENT_* or a custom platform one)
* @param cb_ops Callback operations (Can be NULL if any)
* @return 0 on success, error otherwise
*/
int sbi_sse_add_event(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops);
/* Inject an event to the current hard
* @param event_id Event identifier (SBI_SSE_EVENT_*)
* @param regs Registers that were used on SBI entry
* @return 0 on success, error otherwise
*/
int sbi_sse_inject_event(uint32_t event_id);
void sbi_sse_process_pending_events(struct sbi_trap_regs *regs);
int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot);
void sbi_sse_exit(struct sbi_scratch *scratch);
/* Interface called from sbi_ecall_sse.c */
int sbi_sse_register(uint32_t event_id, unsigned long handler_entry_pc,
unsigned long handler_entry_arg);
int sbi_sse_unregister(uint32_t event_id);
int sbi_sse_hart_mask(void);
int sbi_sse_hart_unmask(void);
int sbi_sse_enable(uint32_t event_id);
int sbi_sse_disable(uint32_t event_id);
int sbi_sse_complete(struct sbi_trap_regs *regs, struct sbi_ecall_return *out);
int sbi_sse_inject_from_ecall(uint32_t event_id, unsigned long hart_id,
struct sbi_ecall_return *out);
int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id,
uint32_t attr_count, unsigned long output_phys_lo,
unsigned long output_phys_hi);
int sbi_sse_write_attrs(uint32_t event_id, uint32_t base_attr_id,
uint32_t attr_count, unsigned long input_phys_lo,
unsigned long input_phys_hi);
#endif

View File

@@ -28,9 +28,6 @@ struct sbi_timer_device {
/** Stop timer event for current HART */
void (*timer_event_stop)(void);
/** Initialize timer device for current HART */
int (*warm_init)(void);
};
struct sbi_scratch;
@@ -81,10 +78,8 @@ u64 sbi_timer_get_delta(void);
/** Set timer delta value for current HART */
void sbi_timer_set_delta(ulong delta);
#if __riscv_xlen == 32
/** Set upper 32-bits of timer delta value for current HART */
void sbi_timer_set_delta_upper(ulong delta_upper);
#endif
/** Start timer event for current HART */
void sbi_timer_event_start(u64 next_event);

View File

@@ -20,35 +20,34 @@
/* clang-format on */
struct sbi_scratch;
#define SBI_TLB_FIFO_NUM_ENTRIES 8
enum sbi_tlb_type {
SBI_TLB_FENCE_I = 0,
SBI_TLB_SFENCE_VMA,
SBI_TLB_SFENCE_VMA_ASID,
SBI_TLB_HFENCE_GVMA_VMID,
SBI_TLB_HFENCE_GVMA,
SBI_TLB_HFENCE_VVMA_ASID,
SBI_TLB_HFENCE_VVMA,
SBI_TLB_TYPE_MAX,
};
struct sbi_scratch;
struct sbi_tlb_info {
unsigned long start;
unsigned long size;
uint16_t asid;
uint16_t vmid;
enum sbi_tlb_type type;
unsigned long asid;
unsigned long vmid;
void (*local_fn)(struct sbi_tlb_info *tinfo);
struct sbi_hartmask smask;
};
#define SBI_TLB_INFO_INIT(__p, __start, __size, __asid, __vmid, __type, __src) \
void sbi_tlb_local_hfence_vvma(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_hfence_gvma(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_sfence_vma(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_hfence_vvma_asid(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_hfence_gvma_vmid(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_sfence_vma_asid(struct sbi_tlb_info *tinfo);
void sbi_tlb_local_fence_i(struct sbi_tlb_info *tinfo);
#define SBI_TLB_INFO_INIT(__p, __start, __size, __asid, __vmid, __lfn, __src) \
do { \
(__p)->start = (__start); \
(__p)->size = (__size); \
(__p)->asid = (__asid); \
(__p)->vmid = (__vmid); \
(__p)->type = (__type); \
(__p)->local_fn = (__lfn); \
SBI_HARTMASK_INIT_EXCEPT(&(__p)->smask, (__src)); \
} while (0)

View File

@@ -87,18 +87,20 @@
/** Last member index in sbi_trap_regs */
#define SBI_TRAP_REGS_last 35
/** Index of epc member in sbi_trap_info */
#define SBI_TRAP_INFO_epc 0
/** Index of cause member in sbi_trap_info */
#define SBI_TRAP_INFO_cause 0
#define SBI_TRAP_INFO_cause 1
/** Index of tval member in sbi_trap_info */
#define SBI_TRAP_INFO_tval 1
#define SBI_TRAP_INFO_tval 2
/** Index of tval2 member in sbi_trap_info */
#define SBI_TRAP_INFO_tval2 2
#define SBI_TRAP_INFO_tval2 3
/** Index of tinst member in sbi_trap_info */
#define SBI_TRAP_INFO_tinst 3
#define SBI_TRAP_INFO_tinst 4
/** Index of gva member in sbi_trap_info */
#define SBI_TRAP_INFO_gva 4
#define SBI_TRAP_INFO_gva 5
/** Last member index in sbi_trap_info */
#define SBI_TRAP_INFO_last 5
#define SBI_TRAP_INFO_last 6
/* clang-format on */
@@ -112,90 +114,76 @@
/** Size (in bytes) of sbi_trap_info */
#define SBI_TRAP_INFO_SIZE SBI_TRAP_INFO_OFFSET(last)
#define STACK_BOUNDARY 16
#define ALIGN_TO_BOUNDARY(x, a) (((x) + (a) - 1) & ~((a) - 1))
/** Size (in bytes) of sbi_trap_context */
#define SBI_TRAP_CONTEXT_SIZE ALIGN_TO_BOUNDARY((SBI_TRAP_REGS_SIZE + \
SBI_TRAP_INFO_SIZE + \
__SIZEOF_POINTER__), STACK_BOUNDARY)
#ifndef __ASSEMBLER__
#include <sbi/sbi_types.h>
#include <sbi/sbi_scratch.h>
/** Representation of register state at time of trap/interrupt */
struct sbi_trap_regs {
union {
unsigned long gprs[32];
struct {
/** zero register state */
unsigned long zero;
/** ra register state */
unsigned long ra;
/** sp register state */
unsigned long sp;
/** gp register state */
unsigned long gp;
/** tp register state */
unsigned long tp;
/** t0 register state */
unsigned long t0;
/** t1 register state */
unsigned long t1;
/** t2 register state */
unsigned long t2;
/** s0 register state */
unsigned long s0;
/** s1 register state */
unsigned long s1;
/** a0 register state */
unsigned long a0;
/** a1 register state */
unsigned long a1;
/** a2 register state */
unsigned long a2;
/** a3 register state */
unsigned long a3;
/** a4 register state */
unsigned long a4;
/** a5 register state */
unsigned long a5;
/** a6 register state */
unsigned long a6;
/** a7 register state */
unsigned long a7;
/** s2 register state */
unsigned long s2;
/** s3 register state */
unsigned long s3;
/** s4 register state */
unsigned long s4;
/** s5 register state */
unsigned long s5;
/** s6 register state */
unsigned long s6;
/** s7 register state */
unsigned long s7;
/** s8 register state */
unsigned long s8;
/** s9 register state */
unsigned long s9;
/** s10 register state */
unsigned long s10;
/** s11 register state */
unsigned long s11;
/** t3 register state */
unsigned long t3;
/** t4 register state */
unsigned long t4;
/** t5 register state */
unsigned long t5;
/** t6 register state */
unsigned long t6;
};
};
/** zero register state */
unsigned long zero;
/** ra register state */
unsigned long ra;
/** sp register state */
unsigned long sp;
/** gp register state */
unsigned long gp;
/** tp register state */
unsigned long tp;
/** t0 register state */
unsigned long t0;
/** t1 register state */
unsigned long t1;
/** t2 register state */
unsigned long t2;
/** s0 register state */
unsigned long s0;
/** s1 register state */
unsigned long s1;
/** a0 register state */
unsigned long a0;
/** a1 register state */
unsigned long a1;
/** a2 register state */
unsigned long a2;
/** a3 register state */
unsigned long a3;
/** a4 register state */
unsigned long a4;
/** a5 register state */
unsigned long a5;
/** a6 register state */
unsigned long a6;
/** a7 register state */
unsigned long a7;
/** s2 register state */
unsigned long s2;
/** s3 register state */
unsigned long s3;
/** s4 register state */
unsigned long s4;
/** s5 register state */
unsigned long s5;
/** s6 register state */
unsigned long s6;
/** s7 register state */
unsigned long s7;
/** s8 register state */
unsigned long s8;
/** s9 register state */
unsigned long s9;
/** s10 register state */
unsigned long s10;
/** s11 register state */
unsigned long s11;
/** t3 register state */
unsigned long t3;
/** t4 register state */
unsigned long t4;
/** t5 register state */
unsigned long t5;
/** t6 register state */
unsigned long t6;
/** mepc register state */
unsigned long mepc;
/** mstatus register state */
@@ -204,23 +192,10 @@ struct sbi_trap_regs {
unsigned long mstatusH;
};
_Static_assert(
sizeof(((struct sbi_trap_regs *)0)->gprs) ==
offsetof(struct sbi_trap_regs, t6) +
sizeof(((struct sbi_trap_regs *)0)->t6),
"struct sbi_trap_regs's layout differs between gprs and named members");
#define REG_VAL(idx, regs) ((regs)->gprs[(idx)])
#define GET_RS1(insn, regs) REG_VAL(GET_RS1_NUM(insn), regs)
#define GET_RS2(insn, regs) REG_VAL(GET_RS2_NUM(insn), regs)
#define GET_RS1S(insn, regs) REG_VAL(GET_RS1S_NUM(insn), regs)
#define GET_RS2S(insn, regs) REG_VAL(GET_RS2S_NUM(insn), regs)
#define GET_RS2C(insn, regs) REG_VAL(GET_RS2C_NUM(insn), regs)
#define SET_RD(insn, regs, val) (REG_VAL(GET_RD_NUM(insn), regs) = (val))
/** Representation of trap details */
struct sbi_trap_info {
/** epc Trap program counter */
unsigned long epc;
/** cause Trap exception cause */
unsigned long cause;
/** tval Trap value */
@@ -233,16 +208,6 @@ struct sbi_trap_info {
unsigned long gva;
};
/** Representation of trap context saved on stack */
struct sbi_trap_context {
/** Register state */
struct sbi_trap_regs regs;
/** Trap details */
struct sbi_trap_info trap;
/** Pointer to previous trap context */
struct sbi_trap_context *prev_context;
};
static inline unsigned long sbi_regs_gva(const struct sbi_trap_regs *regs)
{
/*
@@ -259,35 +224,12 @@ static inline unsigned long sbi_regs_gva(const struct sbi_trap_regs *regs)
#endif
}
static inline bool sbi_regs_from_virt(const struct sbi_trap_regs *regs)
{
#if __riscv_xlen == 32
return (regs->mstatusH & MSTATUSH_MPV) ? true : false;
#else
return (regs->mstatus & MSTATUS_MPV) ? true : false;
#endif
}
static inline int sbi_mstatus_prev_mode(unsigned long mstatus)
{
return (mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
}
int sbi_trap_redirect(struct sbi_trap_regs *regs,
const struct sbi_trap_info *trap);
struct sbi_trap_info *trap);
static inline struct sbi_trap_context *sbi_trap_get_context(struct sbi_scratch *scratch)
{
return (scratch) ? (void *)scratch->trap_context : NULL;
}
struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs);
static inline void sbi_trap_set_context(struct sbi_scratch *scratch,
struct sbi_trap_context *tcntx)
{
scratch->trap_context = (unsigned long)tcntx;
}
struct sbi_trap_context *sbi_trap_handler(struct sbi_trap_context *tcntx);
void __noreturn sbi_trap_exit(const struct sbi_trap_regs *regs);
#endif

View File

@@ -1,40 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#ifndef __SBI_TRAP_LDST_H__
#define __SBI_TRAP_LDST_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_trap.h>
union sbi_ldst_data {
u64 data_u64;
u32 data_u32;
u8 data_bytes[8];
ulong data_ulong;
};
int sbi_misaligned_load_handler(struct sbi_trap_context *tcntx);
int sbi_misaligned_store_handler(struct sbi_trap_context *tcntx);
int sbi_load_access_handler(struct sbi_trap_context *tcntx);
int sbi_store_access_handler(struct sbi_trap_context *tcntx);
ulong sbi_misaligned_tinst_fixup(ulong orig_tinst, ulong new_tinst,
ulong addr_offset);
int sbi_misaligned_v_ld_emulator(int rlen, union sbi_ldst_data *out_val,
struct sbi_trap_context *tcntx);
int sbi_misaligned_v_st_emulator(int wlen, union sbi_ldst_data in_val,
struct sbi_trap_context *tcntx);
#endif

View File

@@ -44,12 +44,7 @@ typedef unsigned long long uint64_t;
#error "Unexpected __riscv_xlen"
#endif
#if __STDC_VERSION__ < 202000L
typedef _Bool bool;
#define true 1
#define false 0
#endif
typedef int bool;
typedef unsigned long ulong;
typedef unsigned long uintptr_t;
typedef unsigned long size_t;
@@ -66,15 +61,15 @@ typedef uint32_t be32_t;
typedef uint64_t le64_t;
typedef uint64_t be64_t;
#define true 1
#define false 0
#define NULL ((void *)0)
#define __packed __attribute__((packed))
#define __noreturn __attribute__((noreturn))
#define __aligned(x) __attribute__((aligned(x)))
#ifndef __always_inline
#define __always_inline inline __attribute__((always_inline))
#endif
#define likely(x) __builtin_expect((x), 1)
#define unlikely(x) __builtin_expect((x), 0)
@@ -96,13 +91,6 @@ typedef uint64_t be64_t;
const typeof(((type *)0)->member) * __mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#define assert_member_offset(type, member, offset) \
_Static_assert( \
(offsetof(type, member)) == (offset ), \
"The offset " #offset " of " #member " in " #type \
"is not correct, please redefine it.")
#define array_size(x) (sizeof(x) / sizeof((x)[0]))
#define MAX(a, b) ((a) > (b) ? (a) : (b))

View File

@@ -1,73 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Author: Ivan Orlov <ivan.orlov0322@gmail.com>
*/
#ifdef CONFIG_SBIUNIT
#ifndef __SBI_UNIT_H__
#define __SBI_UNIT_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_string.h>
struct sbiunit_test_case {
const char *name;
bool failed;
void (*test_func)(struct sbiunit_test_case *test);
};
struct sbiunit_test_suite {
const char *name;
void (*init)(void);
struct sbiunit_test_case *cases;
};
#define SBIUNIT_TEST_CASE(func) \
{ \
.name = #func, \
.failed = false, \
.test_func = (func) \
}
#define SBIUNIT_END_CASE { }
#define SBIUNIT_TEST_SUITE(suite_name, cases_arr) \
struct sbiunit_test_suite suite_name = { \
.name = #suite_name, \
.init = NULL, \
.cases = cases_arr \
}
#define _sbiunit_msg(test, msg) "[SBIUnit] [%s:%d]: %s: %s", __FILE__, \
__LINE__, test->name, msg
#define SBIUNIT_INFO(test, msg) sbi_printf(_sbiunit_msg(test, msg))
#define SBIUNIT_PANIC(test, msg) sbi_panic(_sbiunit_msg(test, msg))
#define SBIUNIT_EXPECT(test, cond) do { \
if (!(cond)) { \
test->failed = true; \
SBIUNIT_INFO(test, "Condition \"" #cond "\" expected to be true!\n"); \
} \
} while (0)
#define SBIUNIT_ASSERT(test, cond) do { \
if (!(cond)) \
SBIUNIT_PANIC(test, "Condition \"" #cond "\" must be true!\n"); \
} while (0)
#define SBIUNIT_EXPECT_EQ(test, a, b) SBIUNIT_EXPECT(test, (a) == (b))
#define SBIUNIT_ASSERT_EQ(test, a, b) SBIUNIT_ASSERT(test, (a) == (b))
#define SBIUNIT_EXPECT_NE(test, a, b) SBIUNIT_EXPECT(test, (a) != (b))
#define SBIUNIT_ASSERT_NE(test, a, b) SBIUNIT_ASSERT(test, (a) != (b))
#define SBIUNIT_EXPECT_MEMEQ(test, a, b, len) SBIUNIT_EXPECT(test, !sbi_memcmp(a, b, len))
#define SBIUNIT_ASSERT_MEMEQ(test, a, b, len) SBIUNIT_ASSERT(test, !sbi_memcmp(a, b, len))
#define SBIUNIT_EXPECT_STREQ(test, a, b, len) SBIUNIT_EXPECT(test, !sbi_strncmp(a, b, len))
#define SBIUNIT_ASSERT_STREQ(test, a, b, len) SBIUNIT_ASSERT(test, !sbi_strncmp(a, b, len))
void run_all_tests(void);
#endif
#else
#define run_all_tests()
#endif

View File

@@ -11,7 +11,7 @@
#define __SBI_VERSION_H__
#define OPENSBI_VERSION_MAJOR 1
#define OPENSBI_VERSION_MINOR 7
#define OPENSBI_VERSION_MINOR 3
/**
* OpenSBI 32-bit version with:

View File

@@ -1,18 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2025 SiFive
*/
#ifndef __SBI_VISIBILITY_H__
#define __SBI_VISIBILITY_H__
#ifndef __DTS__
/*
* Declare all global objects with hidden visibility so access is PC-relative
* instead of going through the GOT.
*/
#pragma GCC visibility push(hidden)
#endif
#endif

View File

@@ -70,12 +70,12 @@ void fdt_domain_fixup(void *fdt);
*
* @return 0 on success and negative error code on failure
*/
int fdt_domains_populate(const void *fdt);
int fdt_domains_populate(void *fdt);
#else
static inline void fdt_domain_fixup(void *fdt) { }
static inline int fdt_domains_populate(const void *fdt) { return 0; }
static inline int fdt_domains_populate(void *fdt) { return 0; }
#endif

View File

@@ -1,63 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* fdt_driver.h - Generic support for initializing drivers from DT nodes.
*
* Copyright (c) 2024 SiFive
*/
#ifndef __FDT_DRIVER_H__
#define __FDT_DRIVER_H__
#include <sbi_utils/fdt/fdt_helper.h>
struct fdt_driver {
const struct fdt_match *match_table;
int (*init)(const void *fdt, int nodeoff,
const struct fdt_match *match);
bool experimental;
};
/* List of early FDT drivers generated at compile time */
extern const struct fdt_driver *const fdt_early_drivers[];
/**
* Initialize a driver instance for a specific DT node
*
* @param fdt devicetree blob
* @param nodeoff offset of a node in the devicetree blob
* @param drivers NULL-terminated array of drivers to match against this node
*
* @return 0 if a driver was matched and successfully initialized or a negative
* error code on failure
*/
int fdt_driver_init_by_offset(const void *fdt, int nodeoff,
const struct fdt_driver *const *drivers);
/**
* Initialize a driver instance for each DT node that matches any of the
* provided drivers
*
* @param fdt devicetree blob
* @param drivers NULL-terminated array of drivers to match against each node
*
* @return 0 if drivers for all matches (if any) were successfully initialized
* or a negative error code on failure
*/
int fdt_driver_init_all(const void *fdt,
const struct fdt_driver *const *drivers);
/**
* Initialize a driver instance for the first DT node that matches any of the
* provided drivers
*
* @param fdt devicetree blob
* @param drivers NULL-terminated array of drivers to match against each node
*
* @return 0 if a driver was matched and successfully initialized or a negative
* error code on failure
*/
int fdt_driver_init_one(const void *fdt,
const struct fdt_driver *const *drivers);
#endif /* __FDT_DRIVER_H__ */

View File

@@ -9,8 +9,6 @@
#ifndef __FDT_FIXUP_H__
#define __FDT_FIXUP_H__
#include <sbi/sbi_list.h>
struct sbi_cpu_idle_state {
const char *name;
uint32_t suspend_param;
@@ -32,7 +30,7 @@ struct sbi_cpu_idle_state {
* @param states: array of idle state descriptions, ending with empty element
* @return zero on success and -ve on failure
*/
int fdt_add_cpu_idle_states(void *fdt, const struct sbi_cpu_idle_state *state);
int fdt_add_cpu_idle_states(void *dtb, const struct sbi_cpu_idle_state *state);
/**
* Fix up the CPU node in the device tree
@@ -95,19 +93,6 @@ void fdt_plic_fixup(void *fdt);
*/
int fdt_reserved_memory_fixup(void *fdt);
/** Representation of a general fixup */
struct fdt_general_fixup {
struct sbi_dlist head;
const char *name;
void (*do_fixup)(struct fdt_general_fixup *f, void *fdt);
};
/** Register a general fixup */
int fdt_register_general_fixup(struct fdt_general_fixup *fixup);
/** UnRegister a general fixup */
void fdt_unregister_general_fixup(struct fdt_general_fixup *fixup);
/**
* General device tree fix-up
*

View File

@@ -34,87 +34,80 @@ struct platform_uart_data {
unsigned long reg_offset;
};
int fdt_parse_phandle_with_args(const void *fdt, int nodeoff,
const struct fdt_match *fdt_match_node(void *fdt, int nodeoff,
const struct fdt_match *match_table);
int fdt_find_match(void *fdt, int startoff,
const struct fdt_match *match_table,
const struct fdt_match **out_match);
int fdt_parse_phandle_with_args(void *fdt, int nodeoff,
const char *prop, const char *cells_prop,
int index, struct fdt_phandle_args *out_args);
int fdt_get_node_addr_size(const void *fdt, int node, int index,
int fdt_get_node_addr_size(void *fdt, int node, int index,
uint64_t *addr, uint64_t *size);
int fdt_get_node_addr_size_by_name(const void *fdt, int node, const char *name,
uint64_t *addr, uint64_t *size);
bool fdt_node_is_enabled(void *fdt, int nodeoff);
bool fdt_node_is_enabled(const void *fdt, int nodeoff);
int fdt_parse_hart_id(void *fdt, int cpu_offset, u32 *hartid);
int fdt_parse_hart_id(const void *fdt, int cpu_offset, u32 *hartid);
int fdt_parse_max_enabled_hart_id(void *fdt, u32 *max_hartid);
int fdt_parse_max_enabled_hart_id(const void *fdt, u32 *max_hartid);
int fdt_parse_timebase_frequency(void *fdt, unsigned long *freq);
int fdt_parse_cbom_block_size(const void *fdt, int cpu_offset, unsigned long *cbom_block_size);
int fdt_parse_timebase_frequency(const void *fdt, unsigned long *freq);
int fdt_parse_isa_extensions(const void *fdt, unsigned int hartid,
unsigned long *extensions);
int fdt_parse_gaisler_uart_node(const void *fdt, int nodeoffset,
int fdt_parse_gaisler_uart_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
int fdt_parse_renesas_scif_node(const void *fdt, int nodeoffset,
int fdt_parse_renesas_scif_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
int fdt_parse_shakti_uart_node(const void *fdt, int nodeoffset,
int fdt_parse_shakti_uart_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
int fdt_parse_sifive_uart_node(const void *fdt, int nodeoffset,
int fdt_parse_sifive_uart_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
int fdt_parse_uart_node(const void *fdt, int nodeoffset,
int fdt_parse_uart_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
int fdt_parse_uart8250(const void *fdt, struct platform_uart_data *uart,
int fdt_parse_uart8250(void *fdt, struct platform_uart_data *uart,
const char *compatible);
int fdt_parse_xlnx_uartlite_node(const void *fdt, int nodeoffset,
int fdt_parse_xlnx_uartlite_node(void *fdt, int nodeoffset,
struct platform_uart_data *uart);
struct aplic_data;
int fdt_parse_aplic_node(const void *fdt, int nodeoff, struct aplic_data *aplic);
int fdt_parse_aplic_node(void *fdt, int nodeoff, struct aplic_data *aplic);
struct imsic_data;
bool fdt_check_imsic_mlevel(const void *fdt);
bool fdt_check_imsic_mlevel(void *fdt);
int fdt_parse_imsic_node(const void *fdt, int nodeoff, struct imsic_data *imsic);
int fdt_parse_imsic_node(void *fdt, int nodeoff, struct imsic_data *imsic);
struct plic_data;
int fdt_parse_plic_node(const void *fdt, int nodeoffset, struct plic_data *plic);
int fdt_parse_plic_node(void *fdt, int nodeoffset, struct plic_data *plic);
int fdt_parse_plic(const void *fdt, struct plic_data *plic, const char *compat);
int fdt_parse_plic(void *fdt, struct plic_data *plic, const char *compat);
int fdt_parse_aclint_node(const void *fdt, int nodeoffset,
bool for_timer, bool allow_regname,
int fdt_parse_aclint_node(void *fdt, int nodeoffset, bool for_timer,
unsigned long *out_addr1, unsigned long *out_size1,
unsigned long *out_addr2, unsigned long *out_size2,
u32 *out_first_hartid, u32 *out_hart_count);
int fdt_parse_plmt_node(const void *fdt, int nodeoffset, unsigned long *plmt_base,
unsigned long *plmt_size, u32 *hart_count);
int fdt_parse_plmt_node(void *fdt, int nodeoffset, unsigned long *plmt_base,
unsigned long *plmt_size, u32 *hart_count);
int fdt_parse_plicsw_node(const void *fdt, int nodeoffset, unsigned long *plicsw_base,
int fdt_parse_plicsw_node(void *fdt, int nodeoffset, unsigned long *plicsw_base,
unsigned long *size, u32 *hart_count);
int fdt_parse_compat_addr(const void *fdt, uint64_t *addr,
int fdt_parse_compat_addr(void *fdt, uint64_t *addr,
const char *compatible);
static inline const void *fdt_get_address(void)
{
return (const void *)root.next_arg1;
}
static inline void *fdt_get_address_rw(void)
static inline void *fdt_get_address(void)
{
return (void *)root.next_arg1;
}

View File

@@ -13,23 +13,6 @@
#include <sbi/sbi_types.h>
struct fdt_pmu_hw_event_select_map {
uint32_t eidx;
uint64_t select;
};
struct fdt_pmu_hw_event_counter_map {
uint32_t eidx_start;
uint32_t eidx_end;
uint32_t ctr_map;
};
struct fdt_pmu_raw_event_counter_map {
uint64_t select;
uint64_t select_mask;
uint32_t ctr_map;
};
#ifdef CONFIG_FDT_PMU
/**
@@ -43,7 +26,7 @@ struct fdt_pmu_raw_event_counter_map {
*
* @param fdt device tree blob
*/
int fdt_pmu_fixup(void *fdt);
void fdt_pmu_fixup(void *fdt);
/**
* Setup PMU data from device tree
@@ -52,7 +35,7 @@ int fdt_pmu_fixup(void *fdt);
*
* @return 0 on success and negative error code on failure
*/
int fdt_pmu_setup(const void *fdt);
int fdt_pmu_setup(void *fdt);
/**
* Get the mhpmevent select value read from DT for a given event
@@ -65,7 +48,7 @@ uint64_t fdt_pmu_get_select_value(uint32_t event_idx);
#else
static inline void fdt_pmu_fixup(void *fdt) { }
static inline int fdt_pmu_setup(const void *fdt) { return 0; }
static inline int fdt_pmu_setup(void *fdt) { return 0; }
static inline uint64_t fdt_pmu_get_select_value(uint32_t event_idx) { return 0; }
#endif

View File

@@ -10,21 +10,22 @@
#ifndef __FDT_GPIO_H__
#define __FDT_GPIO_H__
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/gpio/gpio.h>
struct fdt_phandle_args;
/** FDT based GPIO driver */
struct fdt_gpio {
struct fdt_driver driver;
const struct fdt_match *match_table;
int (*xlate)(struct gpio_chip *chip,
const struct fdt_phandle_args *pargs,
struct gpio_pin *out_pin);
int (*init)(void *fdt, int nodeoff, u32 phandle,
const struct fdt_match *match);
};
/** Get a GPIO pin using "gpios" DT property of client DT node */
int fdt_gpio_pin_get(const void *fdt, int nodeoff, int index,
int fdt_gpio_pin_get(void *fdt, int nodeoff, int index,
struct gpio_pin *out_pin);
/** Simple xlate function to convert two GPIO FDT cells into GPIO pin */

View File

@@ -40,7 +40,7 @@ struct gpio_pin {
/** Representation of a GPIO chip */
struct gpio_chip {
/** Pointer to GPIO driver owning this GPIO chip */
const void *driver;
void *driver;
/** Uniquie ID of the GPIO chip assigned by the driver */
unsigned int id;
/** Number of GPIOs supported by the GPIO chip */

View File

@@ -10,11 +10,17 @@
#ifndef __FDT_I2C_H__
#define __FDT_I2C_H__
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/i2c/i2c.h>
/** FDT based I2C adapter driver */
struct fdt_i2c_adapter {
const struct fdt_match *match_table;
int (*init)(void *fdt, int nodeoff,
const struct fdt_match *match);
};
/** Get I2C adapter identified by nodeoff */
int fdt_i2c_adapter_get(const void *fdt, int nodeoff,
int fdt_i2c_adapter_get(void *fdt, int nodeoff,
struct i2c_adapter **out_adapter);
#endif

View File

@@ -15,6 +15,9 @@
/** Representation of a I2C adapter */
struct i2c_adapter {
/** Pointer to I2C driver owning this I2C adapter */
void *driver;
/** Unique ID of the I2C adapter assigned by the driver */
int id;

View File

@@ -26,6 +26,8 @@ struct aclint_mswi_data {
u32 hart_count;
};
int aclint_mswi_warm_init(void);
int aclint_mswi_cold_init(struct aclint_mswi_data *mswi);
#endif

View File

@@ -13,25 +13,34 @@
#ifndef _IPI_ANDES_PLICSW_H_
#define _IPI_ANDES_PLICSW_H_
#define PLICSW_PRIORITY_BASE 0x4
#define PLICSW_PRIORITY_BASE 0x4
#define PLICSW_PENDING_BASE 0x1000
#define PLICSW_PENDING_BASE 0x1000
#define PLICSW_PENDING_STRIDE 0x8
#define PLICSW_ENABLE_BASE 0x2000
#define PLICSW_ENABLE_STRIDE 0x80
#define PLICSW_ENABLE_BASE 0x2000
#define PLICSW_ENABLE_STRIDE 0x80
#define PLICSW_CONTEXT_BASE 0x200000
#define PLICSW_CONTEXT_STRIDE 0x1000
#define PLICSW_CONTEXT_CLAIM 0x4
#define PLICSW_CONTEXT_BASE 0x200000
#define PLICSW_CONTEXT_STRIDE 0x1000
#define PLICSW_CONTEXT_CLAIM 0x4
#define PLICSW_REGION_ALIGN 0x1000
#define PLICSW_HART_MASK 0x01010101
#define PLICSW_HART_MAX_NR 8
#define PLICSW_REGION_ALIGN 0x1000
struct plicsw_data {
unsigned long addr;
unsigned long size;
uint32_t hart_count;
/* hart id to source id table */
uint32_t source_id[PLICSW_HART_MAX_NR];
};
int plicsw_warm_ipi_init(void);
int plicsw_cold_ipi_init(struct plicsw_data *plicsw);
#endif /* _IPI_ANDES_PLICSW_H_ */

View File

@@ -11,15 +11,24 @@
#define __FDT_IPI_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_IPI
int fdt_ipi_init(void);
struct fdt_ipi {
const struct fdt_match *match_table;
int (*cold_init)(void *fdt, int nodeoff, const struct fdt_match *match);
int (*warm_init)(void);
void (*exit)(void);
};
void fdt_ipi_exit(void);
int fdt_ipi_init(bool cold_boot);
#else
static inline int fdt_ipi_init(void) { return 0; }
static inline void fdt_ipi_exit(void) { }
static inline int fdt_ipi_init(bool cold_boot) { return 0; }
#endif

View File

@@ -12,7 +12,6 @@
#define __IRQCHIP_APLIC_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_irqchip.h>
#define APLIC_MAX_DELEGATE 16
@@ -31,9 +30,6 @@ struct aplic_delegate_data {
};
struct aplic_data {
/* Private members */
struct sbi_irqchip_device irqchip;
/* Public members */
unsigned long addr;
unsigned long size;
unsigned long num_idc;

View File

@@ -11,15 +11,25 @@
#define __FDT_IRQCHIP_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_IRQCHIP
int fdt_irqchip_init(void);
struct fdt_irqchip {
const struct fdt_match *match_table;
int (*cold_init)(void *fdt, int nodeoff, const struct fdt_match *match);
int (*warm_init)(void);
void (*exit)(void);
};
void fdt_irqchip_exit(void);
int fdt_irqchip_init(bool cold_boot);
#else
static inline int fdt_irqchip_init(void) { return 0; }
static inline void fdt_irqchip_exit(void) { }
static inline int fdt_irqchip_init(bool cold_boot) { return 0; }
#endif

View File

@@ -0,0 +1,33 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Samuel Holland <samuel@sholland.org>
*/
#ifndef __IRQCHIP_FDT_IRQCHIP_PLIC_H__
#define __IRQCHIP_FDT_IRQCHIP_PLIC_H__
#include <sbi/sbi_types.h>
/**
* Save the PLIC priority state
* @param priority pointer to the memory region for the saved priority
* @param num size of the memory region including interrupt source 0
*/
void fdt_plic_priority_save(u8 *priority, u32 num);
/**
* Restore the PLIC priority state
* @param priority pointer to the memory region for the saved priority
* @param num size of the memory region including interrupt source 0
*/
void fdt_plic_priority_restore(const u8 *priority, u32 num);
void fdt_plic_context_save(bool smode, u32 *enable, u32 *threshold, u32 num);
void fdt_plic_context_restore(bool smode, const u32 *enable, u32 threshold,
u32 num);
void thead_plic_restore(void);
#endif

View File

@@ -37,12 +37,14 @@ struct imsic_data {
int imsic_map_hartid_to_data(u32 hartid, struct imsic_data *imsic, int file);
struct imsic_data *imsic_get_data(u32 hartindex);
struct imsic_data *imsic_get_data(u32 hartid);
int imsic_get_target_file(u32 hartindex);
int imsic_get_target_file(u32 hartid);
void imsic_local_irqchip_init(void);
int imsic_warm_irqchip_init(void);
int imsic_data_check(struct imsic_data *imsic);
int imsic_cold_irqchip_init(struct imsic_data *imsic);

View File

@@ -11,41 +11,30 @@
#define __IRQCHIP_PLIC_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_irqchip.h>
struct plic_data {
/* Private members */
struct sbi_irqchip_device irqchip;
/* Public members */
unsigned long addr;
unsigned long size;
unsigned long num_src;
unsigned long flags;
void *pm_data;
s16 context_map[][2];
};
/** Work around a bug on Ariane that requires enabling interrupts at boot */
#define PLIC_FLAG_ARIANE_BUG BIT(0)
/** PLIC must be delegated to S-mode like T-HEAD C906 and C910 */
#define PLIC_FLAG_THEAD_DELEGATION BIT(1)
/** Allocate space for power management save/restore operations */
#define PLIC_FLAG_ENABLE_PM BIT(2)
/* So far, priorities on all consumers of these functions fit in 8 bits. */
void plic_priority_save(const struct plic_data *plic, u8 *priority, u32 num);
#define PLIC_M_CONTEXT 0
#define PLIC_S_CONTEXT 1
void plic_priority_restore(const struct plic_data *plic, const u8 *priority,
u32 num);
#define PLIC_DATA_SIZE(__hart_count) (sizeof(struct plic_data) + \
(__hart_count) * 2 * sizeof(s16))
void plic_context_save(const struct plic_data *plic, int context_id,
u32 *enable, u32 *threshold, u32 num);
#define PLIC_IE_WORDS(__p) ((__p)->num_src / 32 + 1)
void plic_context_restore(const struct plic_data *plic, int context_id,
const u32 *enable, u32 threshold, u32 num);
struct plic_data *plic_get(void);
int plic_context_init(const struct plic_data *plic, int context_id,
bool enable, u32 threshold);
void plic_suspend(void);
int plic_warm_irqchip_init(const struct plic_data *plic,
int m_cntx_id, int s_cntx_id);
void plic_resume(void);
int plic_cold_irqchip_init(struct plic_data *plic);
int plic_cold_irqchip_init(const struct plic_data *plic);
#endif

View File

@@ -1,35 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_MAILBOX_H__
#define __FDT_MAILBOX_H__
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/mailbox/mailbox.h>
struct fdt_phandle_args;
/** FDT based mailbox driver */
struct fdt_mailbox {
struct fdt_driver driver;
int (*xlate)(struct mbox_controller *mbox,
const struct fdt_phandle_args *pargs,
u32 *out_chan_args);
};
/** Request a mailbox channel using "mboxes" DT property of client DT node */
int fdt_mailbox_request_chan(const void *fdt, int nodeoff, int index,
struct mbox_chan **out_chan);
/** Simple xlate function to convert one mailbox FDT cell into channel args */
int fdt_mailbox_simple_xlate(struct mbox_controller *mbox,
const struct fdt_phandle_args *pargs,
u32 *out_chan_args);
#endif

View File

@@ -1,180 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __MAILBOX_H__
#define __MAILBOX_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
#include <sbi/riscv_atomic.h>
/** Representation of a mailbox channel */
struct mbox_chan {
/** List head */
struct sbi_dlist node;
/** Pointer to the mailbox controller */
struct mbox_controller *mbox;
/**
* Arguments (or parameters) to identify a mailbox channel
* within a mailbox controller.
*/
#define MBOX_CHAN_MAX_ARGS 2
u32 chan_args[MBOX_CHAN_MAX_ARGS];
};
#define to_mbox_chan(__node) \
container_of((__node), struct mbox_chan, node)
/**
* Representation of a mailbox data transfer
*
* NOTE: If both "tx" and "rx" are non-NULL then Tx is done before Rx.
*/
struct mbox_xfer {
#define MBOX_XFER_SEQ (1UL << 0)
/** Transfer flags */
unsigned long flags;
/** Transfer arguments (or parameters) */
void *args;
/**
* Sequence number
*
* If MBOX_XFER_SEQ is not set in flags then mbox_chan_xfer()
* will generate a unique sequence number and update this field
* else mbox_chan_xfer() will blindly use the sequence number
* specified by this field.
*/
long seq;
/** Send data pointer */
void *tx;
/** Send data length (valid only if tx != NULL) */
unsigned long tx_len;
/**
* Send timeout milliseconds (valid only if tx != NULL)
*
* If this field is non-zero along with tx != NULL then the
* mailbox controller driver will wait specified milliseconds
* for send data transfer to complete else the mailbox controller
* driver will not wait.
*/
unsigned long tx_timeout;
/** Receive data pointer */
void *rx;
/** Receive data length (valid only if rx != NULL) */
unsigned long rx_len;
/**
* Receive timeout milliseconds (valid only if rx != NULL)
*
* If this field is non-zero along with rx != NULL then the
* mailbox controller driver will wait specified milliseconds
* for receive data transfer to complete else the mailbox
* controller driver will not wait.
*/
unsigned long rx_timeout;
};
#define mbox_xfer_init_tx(__p, __a, __t, __t_len, __t_tim) \
do { \
(__p)->flags = 0; \
(__p)->args = (__a); \
(__p)->tx = (__t); \
(__p)->tx_len = (__t_len); \
(__p)->tx_timeout = (__t_tim); \
(__p)->rx = NULL; \
(__p)->rx_len = 0; \
(__p)->rx_timeout = 0; \
} while (0)
#define mbox_xfer_init_rx(__p, __a, __r, __r_len, __r_tim) \
do { \
(__p)->flags = 0; \
(__p)->args = (__a); \
(__p)->tx = NULL; \
(__p)->tx_len = 0; \
(__p)->tx_timeout = 0; \
(__p)->rx = (__r); \
(__p)->rx_len = (__r_len); \
(__p)->rx_timeout = (__r_tim); \
} while (0)
#define mbox_xfer_init_txrx(__p, __a, __t, __t_len, __t_tim, __r, __r_len, __r_tim)\
do { \
(__p)->flags = 0; \
(__p)->args = (__a); \
(__p)->tx = (__t); \
(__p)->tx_len = (__t_len); \
(__p)->tx_timeout = (__t_tim); \
(__p)->rx = (__r); \
(__p)->rx_len = (__r_len); \
(__p)->rx_timeout = (__r_tim); \
} while (0)
#define mbox_xfer_set_sequence(__p, __seq) \
do { \
(__p)->flags |= MBOX_XFER_SEQ; \
(__p)->seq = (__seq); \
} while (0)
/** Representation of a mailbox controller */
struct mbox_controller {
/** List head */
struct sbi_dlist node;
/** Next sequence atomic counter */
atomic_t xfer_next_seq;
/* List of mailbox channels */
struct sbi_dlist chan_list;
/** Unique ID of the mailbox controller assigned by the driver */
unsigned int id;
/** Maximum length of transfer supported by the mailbox controller */
unsigned int max_xfer_len;
/** Pointer to mailbox driver owning this mailbox controller */
void *driver;
/** Request a mailbox channel from the mailbox controller */
struct mbox_chan *(*request_chan)(struct mbox_controller *mbox,
u32 *chan_args);
/** Free a mailbox channel from the mailbox controller */
void (*free_chan)(struct mbox_controller *mbox,
struct mbox_chan *chan);
/** Transfer data over mailbox channel */
int (*xfer)(struct mbox_chan *chan, struct mbox_xfer *xfer);
/** Get an attribute of mailbox channel */
int (*get_attribute)(struct mbox_chan *chan, int attr_id, void *out_value);
/** Set an attribute of mailbox channel */
int (*set_attribute)(struct mbox_chan *chan, int attr_id, void *new_value);
};
#define to_mbox_controller(__node) \
container_of((__node), struct mbox_controller, node)
/** Find a registered mailbox controller */
struct mbox_controller *mbox_controller_find(unsigned int id);
/** Register mailbox controller */
int mbox_controller_add(struct mbox_controller *mbox);
/** Un-register mailbox controller */
void mbox_controller_remove(struct mbox_controller *mbox);
/** Request a mailbox channel */
struct mbox_chan *mbox_controller_request_chan(struct mbox_controller *mbox,
u32 *chan_args);
/** Free a mailbox channel */
void mbox_controller_free_chan(struct mbox_chan *chan);
/** Data transfer over mailbox channel */
int mbox_chan_xfer(struct mbox_chan *chan, struct mbox_xfer *xfer);
/** Get an attribute of mailbox channel */
int mbox_chan_get_attribute(struct mbox_chan *chan, int attr_id, void *out_value);
/** Set an attribute of mailbox channel */
int mbox_chan_set_attribute(struct mbox_chan *chan, int attr_id, void *new_value);
#endif

View File

@@ -1,33 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __RPMI_MAILBOX_H__
#define __RPMI_MAILBOX_H__
#include <sbi/sbi_error.h>
#include <sbi_utils/mailbox/mailbox.h>
#include <sbi_utils/mailbox/rpmi_msgprot.h>
#define rpmi_u32_count(__var) (sizeof(__var) / sizeof(u32))
/** Convert RPMI error to SBI error */
int rpmi_xlate_error(enum rpmi_error error);
/** Typical RPMI normal request with at least status code in response */
int rpmi_normal_request_with_status(
struct mbox_chan *chan, u32 service_id,
void *req, u32 req_words, u32 req_endian_words,
void *resp, u32 resp_words, u32 resp_endian_words);
/* RPMI posted request which is without any response*/
int rpmi_posted_request(
struct mbox_chan *chan, u32 service_id,
void *req, u32 req_words, u32 req_endian_words);
#endif /* !__RPMI_MAILBOX_H__ */

View File

@@ -1,706 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Rahul Pathak <rpathak@ventanamicro.com>
* Subrahmanya Lingappa <slingappa@ventanamicro.com>
*/
#ifndef __RPMI_MSGPROT_H__
#define __RPMI_MSGPROT_H__
#include <sbi/sbi_byteorder.h>
#include <sbi/sbi_error.h>
/*
* 31 0
* +---------------------+-----------------------+
* | FLAGS | SERVICE_ID | SERVICEGROUP_ID |
* +---------------------+-----------------------+
* | TOKEN | DATA LENGTH |
* +---------------------+-----------------------+
* | DATA/PAYLOAD |
* +---------------------------------------------+
*/
/** Message Header byte offset */
#define RPMI_MSG_HDR_OFFSET (0x0)
/** Message Header Size in bytes */
#define RPMI_MSG_HDR_SIZE (8)
/** ServiceGroup ID field byte offset */
#define RPMI_MSG_SERVICEGROUP_ID_OFFSET (0x0)
/** ServiceGroup ID field size in bytes */
#define RPMI_MSG_SERVICEGROUP_ID_SIZE (2)
/** Service ID field byte offset */
#define RPMI_MSG_SERVICE_ID_OFFSET (0x2)
/** Service ID field size in bytes */
#define RPMI_MSG_SERVICE_ID_SIZE (1)
/** Flags field byte offset */
#define RPMI_MSG_FLAGS_OFFSET (0x3)
/** Flags field size in bytes */
#define RPMI_MSG_FLAGS_SIZE (1)
#define RPMI_MSG_FLAGS_TYPE_POS (0U)
#define RPMI_MSG_FLAGS_TYPE_MASK 0x7
#define RPMI_MSG_FLAGS_TYPE \
((0x7) << RPMI_MSG_FLAGS_TYPE_POS)
#define RPMI_MSG_FLAGS_DOORBELL_POS (3U)
#define RPMI_MSG_FLAGS_DOORBELL_MASK 0x1
#define RPMI_MSG_FLAGS_DOORBELL \
((0x1) << RPMI_MSG_FLAGS_DOORBELL_POS)
/** Data length field byte offset */
#define RPMI_MSG_DATALEN_OFFSET (0x4)
/** Data length field size in bytes */
#define RPMI_MSG_DATALEN_SIZE (2)
/** Token field byte offset */
#define RPMI_MSG_TOKEN_OFFSET (0x6)
/** Token field size in bytes */
#define RPMI_MSG_TOKEN_SIZE (2)
/** Token field mask */
#define RPMI_MSG_TOKEN_MASK (0xffffU)
/** Data field byte offset */
#define RPMI_MSG_DATA_OFFSET (RPMI_MSG_HDR_SIZE)
/** Data field size in bytes */
#define RPMI_MSG_DATA_SIZE(__slot_size) ((__slot_size) - RPMI_MSG_HDR_SIZE)
/** Minimum slot size in bytes */
#define RPMI_SLOT_SIZE_MIN (64)
/** Name length of 16 characters */
#define RPMI_NAME_CHARS_MAX (16)
/** Queue layout */
#define RPMI_QUEUE_HEAD_SLOT 0
#define RPMI_QUEUE_TAIL_SLOT 1
#define RPMI_QUEUE_HEADER_SLOTS 2
/** Default timeout values */
#define RPMI_DEF_TX_TIMEOUT 20
#define RPMI_DEF_RX_TIMEOUT 20
/**
* Common macro to generate composite version from major
* and minor version numbers.
*
* RPMI has Specification version, Implementation version
* Service group versions which follow the same versioning
* encoding as below.
*/
#define RPMI_VERSION(__major, __minor) (((__major) << 16) | (__minor))
/** RPMI Message Header */
struct rpmi_message_header {
le16_t servicegroup_id;
uint8_t service_id;
uint8_t flags;
le16_t datalen;
le16_t token;
} __packed;
/** RPMI Message */
struct rpmi_message {
struct rpmi_message_header header;
u8 data[0];
} __packed;
/** RPMI Messages Types */
enum rpmi_message_type {
/* Normal request backed with ack */
RPMI_MSG_NORMAL_REQUEST = 0x0,
/* Request without any ack */
RPMI_MSG_POSTED_REQUEST = 0x1,
/* Acknowledgment for normal request message */
RPMI_MSG_ACKNOWLDGEMENT = 0x2,
/* Notification message */
RPMI_MSG_NOTIFICATION = 0x3,
};
/** RPMI Error Types */
enum rpmi_error {
/* Success */
RPMI_SUCCESS = 0,
/* General failure */
RPMI_ERR_FAILED = -1,
/* Service or feature not supported */
RPMI_ERR_NOTSUPP = -2,
/* Invalid Parameter */
RPMI_ERR_INVALID_PARAM = -3,
/*
* Denied to insufficient permissions
* or due to unmet prerequisite
*/
RPMI_ERR_DENIED = -4,
/* Invalid address or offset */
RPMI_ERR_INVALID_ADDR = -5,
/*
* Operation failed as it was already in
* progress or the state has changed already
* for which the operation was carried out.
*/
RPMI_ERR_ALREADY = -6,
/*
* Error in implementation which violates
* the specification version
*/
RPMI_ERR_EXTENSION = -7,
/* Operation failed due to hardware issues */
RPMI_ERR_HW_FAULT = -8,
/* System, device or resource is busy */
RPMI_ERR_BUSY = -9,
/* System or device or resource in invalid state */
RPMI_ERR_INVALID_STATE = -10,
/* Index, offset or address is out of range */
RPMI_ERR_BAD_RANGE = -11,
/* Operation timed out */
RPMI_ERR_TIMEOUT = -12,
/*
* Error in input or output or
* error in sending or receiving data
* through communication medium
*/
RPMI_ERR_IO = -13,
/* No data available */
RPMI_ERR_NO_DATA = -14,
RPMI_ERR_RESERVED_START = -15,
RPMI_ERR_RESERVED_END = -127,
RPMI_ERR_VENDOR_START = -128,
};
/** RPMI Mailbox Message Arguments */
struct rpmi_message_args {
u32 flags;
#define RPMI_MSG_FLAGS_NO_TX (1U << 0)
#define RPMI_MSG_FLAGS_NO_RX (1U << 1)
#define RPMI_MSG_FLAGS_NO_RX_TOKEN (1U << 2)
enum rpmi_message_type type;
u8 service_id;
u32 tx_endian_words;
u32 rx_endian_words;
u16 rx_token;
u32 rx_data_len;
};
/** RPMI Mailbox Channel Attribute IDs */
enum rpmi_channel_attribute_id {
RPMI_CHANNEL_ATTR_PROTOCOL_VERSION = 0,
RPMI_CHANNEL_ATTR_MAX_DATA_LEN,
RPMI_CHANNEL_ATTR_P2A_DOORBELL_SYSMSI_INDEX,
RPMI_CHANNEL_ATTR_TX_TIMEOUT,
RPMI_CHANNEL_ATTR_RX_TIMEOUT,
RPMI_CHANNEL_ATTR_SERVICEGROUP_ID,
RPMI_CHANNEL_ATTR_SERVICEGROUP_VERSION,
RPMI_CHANNEL_ATTR_IMPL_ID,
RPMI_CHANNEL_ATTR_IMPL_VERSION,
RPMI_CHANNEL_ATTR_MAX,
};
/*
* RPMI SERVICEGROUPS AND SERVICES
*/
/** RPMI ServiceGroups IDs */
enum rpmi_servicegroup_id {
RPMI_SRVGRP_ID_MIN = 0,
RPMI_SRVGRP_BASE = 0x0001,
RPMI_SRVGRP_SYSTEM_MSI = 0x0002,
RPMI_SRVGRP_SYSTEM_RESET = 0x0003,
RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004,
RPMI_SRVGRP_HSM = 0x0005,
RPMI_SRVGRP_CPPC = 0x0006,
RPMI_SRVGRP_CLOCK = 0x0008,
RPMI_SRVGRP_ID_MAX_COUNT,
/* Reserved range for service groups */
RPMI_SRVGRP_RESERVE_START = RPMI_SRVGRP_ID_MAX_COUNT,
RPMI_SRVGRP_RESERVE_END = 0x7FFF,
/* Vendor/Implementation-specific service groups range */
RPMI_SRVGRP_VENDOR_START = 0x8000,
RPMI_SRVGRP_VENDOR_END = 0xFFFF,
};
/** RPMI enable notification request */
struct rpmi_enable_notification_req {
u32 eventid;
};
/** RPMI enable notification response */
struct rpmi_enable_notification_resp {
s32 status;
};
/** RPMI Base ServiceGroup Service IDs */
enum rpmi_base_service_id {
RPMI_BASE_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_BASE_SRV_GET_IMPLEMENTATION_VERSION = 0x02,
RPMI_BASE_SRV_GET_IMPLEMENTATION_IDN = 0x03,
RPMI_BASE_SRV_GET_SPEC_VERSION = 0x04,
RPMI_BASE_SRV_GET_PLATFORM_INFO = 0x05,
RPMI_BASE_SRV_PROBE_SERVICE_GROUP = 0x06,
RPMI_BASE_SRV_GET_ATTRIBUTES = 0x07,
};
#define RPMI_BASE_FLAGS_F0_PRIVILEGE (1U << 1)
#define RPMI_BASE_FLAGS_F0_EV_NOTIFY (1U << 0)
enum rpmi_base_context_priv_level {
RPMI_BASE_CONTEXT_PRIV_S_MODE,
RPMI_BASE_CONTEXT_PRIV_M_MODE,
};
struct rpmi_base_get_attributes_resp {
s32 status_code;
u32 f0;
u32 f1;
u32 f2;
u32 f3;
};
struct rpmi_base_get_platform_info_resp {
s32 status;
u32 plat_info_len;
char plat_info[];
};
/** RPMI System MSI ServiceGroup Service IDs */
enum rpmi_sysmsi_service_id {
RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x2,
RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x3,
RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x4,
RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x5,
RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x6,
RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x7,
RPMI_SYSMSI_SRV_ID_MAX_COUNT,
};
/** Response for system MSI service group attributes */
struct rpmi_sysmsi_get_attributes_resp {
s32 status;
u32 sys_num_msi;
u32 flag0;
u32 flag1;
};
/** Request for system MSI attributes */
struct rpmi_sysmsi_get_msi_attributes_req {
u32 sys_msi_index;
};
/** Response for system MSI attributes */
struct rpmi_sysmsi_get_msi_attributes_resp {
s32 status;
u32 flag0;
u32 flag1;
u8 name[16];
};
#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV (1U << 0)
/** Request for system MSI set state */
struct rpmi_sysmsi_set_msi_state_req {
u32 sys_msi_index;
u32 sys_msi_state;
};
#define RPMI_SYSMSI_MSI_STATE_ENABLE (1U << 0)
#define RPMI_SYSMSI_MSI_STATE_PENDING (1U << 1)
/** Response for system MSI set state */
struct rpmi_sysmsi_set_msi_state_resp {
s32 status;
};
/** Request for system MSI get state */
struct rpmi_sysmsi_get_msi_state_req {
u32 sys_msi_index;
};
/** Response for system MSI get state */
struct rpmi_sysmsi_get_msi_state_resp {
s32 status;
u32 sys_msi_state;
};
/** Request for system MSI set target */
struct rpmi_sysmsi_set_msi_target_req {
u32 sys_msi_index;
u32 sys_msi_address_low;
u32 sys_msi_address_high;
u32 sys_msi_data;
};
/** Response for system MSI set target */
struct rpmi_sysmsi_set_msi_target_resp {
s32 status;
};
/** Request for system MSI get target */
struct rpmi_sysmsi_get_msi_target_req {
u32 sys_msi_index;
};
/** Response for system MSI get target */
struct rpmi_sysmsi_get_msi_target_resp {
s32 status;
u32 sys_msi_address_low;
u32 sys_msi_address_high;
u32 sys_msi_data;
};
/** RPMI System Reset ServiceGroup Service IDs */
enum rpmi_system_reset_service_id {
RPMI_SYSRST_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_SYSRST_SRV_GET_ATTRIBUTES = 0x02,
RPMI_SYSRST_SRV_SYSTEM_RESET = 0x03,
RPMI_SYSRST_SRV_ID_MAX_COUNT,
};
/** RPMI System Reset types */
enum rpmi_sysrst_reset_type {
RPMI_SYSRST_TYPE_SHUTDOWN = 0x0,
RPMI_SYSRST_TYPE_COLD_REBOOT = 0x1,
RPMI_SYSRST_TYPE_WARM_REBOOT = 0x2,
RPMI_SYSRST_TYPE_MAX,
};
#define RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_POS (1)
#define RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_MASK \
(1U << RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_POS)
/** Response for system reset attributes */
struct rpmi_sysrst_get_reset_attributes_resp {
s32 status;
u32 flags;
};
/** RPMI System Suspend ServiceGroup Service IDs */
enum rpmi_system_suspend_service_id {
RPMI_SYSSUSP_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_SYSSUSP_SRV_GET_ATTRIBUTES = 0x02,
RPMI_SYSSUSP_SRV_SYSTEM_SUSPEND = 0x03,
RPMI_SYSSUSP_SRV_ID_MAX_COUNT,
};
/** Request for system suspend attributes */
struct rpmi_syssusp_get_attr_req {
u32 susp_type;
};
#define RPMI_SYSSUSP_ATTRS_FLAGS_RESUMEADDR (1U << 1)
#define RPMI_SYSSUSP_ATTRS_FLAGS_SUSPENDTYPE 1U
/** Response for system suspend attributes */
struct rpmi_syssusp_get_attr_resp {
s32 status;
u32 flags;
};
struct rpmi_syssusp_suspend_req {
u32 hartid;
u32 suspend_type;
u32 resume_addr_lo;
u32 resume_addr_hi;
};
struct rpmi_syssusp_suspend_resp {
s32 status;
};
/** RPMI HSM State Management ServiceGroup Service IDs */
enum rpmi_hsm_service_id {
RPMI_HSM_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_HSM_SRV_GET_HART_STATUS = 0x02,
RPMI_HSM_SRV_GET_HART_LIST = 0x03,
RPMI_HSM_SRV_GET_SUSPEND_TYPES = 0x04,
RPMI_HSM_SRV_GET_SUSPEND_INFO = 0x05,
RPMI_HSM_SRV_HART_START = 0x06,
RPMI_HSM_SRV_HART_STOP = 0x07,
RPMI_HSM_SRV_HART_SUSPEND = 0x08,
RPMI_HSM_SRV_ID_MAX = 0x09,
};
/* HSM service group request and response structs */
struct rpmi_hsm_hart_start_req {
u32 hartid;
u32 start_addr_lo;
u32 start_addr_hi;
};
struct rpmi_hsm_hart_start_resp {
s32 status;
};
struct rpmi_hsm_hart_stop_req {
u32 hartid;
};
struct rpmi_hsm_hart_stop_resp {
s32 status;
};
struct rpmi_hsm_hart_susp_req {
u32 hartid;
u32 suspend_type;
u32 resume_addr_lo;
u32 resume_addr_hi;
};
struct rpmi_hsm_hart_susp_resp {
s32 status;
};
struct rpmi_hsm_get_hart_status_req {
u32 hartid;
};
struct rpmi_hsm_get_hart_status_resp {
s32 status;
u32 hart_status;
};
struct rpmi_hsm_get_hart_list_req {
u32 start_index;
};
struct rpmi_hsm_get_hart_list_resp {
s32 status;
u32 remaining;
u32 returned;
/* remaining space need to be adjusted for the above 3 u32's */
u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)];
};
struct rpmi_hsm_get_susp_types_req {
u32 start_index;
};
struct rpmi_hsm_get_susp_types_resp {
s32 status;
u32 remaining;
u32 returned;
/* remaining space need to be adjusted for the above 3 u32's */
u32 types[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)];
};
struct rpmi_hsm_get_susp_info_req {
u32 suspend_type;
};
#define RPMI_HSM_SUSPEND_INFO_FLAGS_TIMER_STOP 1U
struct rpmi_hsm_get_susp_info_resp {
s32 status;
u32 flags;
u32 entry_latency_us;
u32 exit_latency_us;
u32 wakeup_latency_us;
u32 min_residency_us;
};
/** RPMI CPPC ServiceGroup Service IDs */
enum rpmi_cppc_service_id {
RPMI_CPPC_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_CPPC_SRV_PROBE_REG = 0x02,
RPMI_CPPC_SRV_READ_REG = 0x03,
RPMI_CPPC_SRV_WRITE_REG = 0x04,
RPMI_CPPC_SRV_GET_FAST_CHANNEL_REGION = 0x05,
RPMI_CPPC_SRV_GET_FAST_CHANNEL_OFFSET = 0x06,
RPMI_CPPC_SRV_GET_HART_LIST = 0x07,
RPMI_CPPC_SRV_MAX_COUNT,
};
struct rpmi_cppc_probe_req {
u32 hart_id;
u32 reg_id;
};
struct rpmi_cppc_probe_resp {
s32 status;
u32 reg_len;
};
struct rpmi_cppc_read_reg_req {
u32 hart_id;
u32 reg_id;
};
struct rpmi_cppc_read_reg_resp {
s32 status;
u32 data_lo;
u32 data_hi;
};
struct rpmi_cppc_write_reg_req {
u32 hart_id;
u32 reg_id;
u32 data_lo;
u32 data_hi;
};
struct rpmi_cppc_write_reg_resp {
s32 status;
};
struct rpmi_cppc_get_fastchan_offset_req {
u32 hart_id;
};
struct rpmi_cppc_get_fastchan_offset_resp {
s32 status;
u32 fc_perf_request_offset_lo;
u32 fc_perf_request_offset_hi;
u32 fc_perf_feedback_offset_lo;
u32 fc_perf_feedback_offset_hi;
};
#define RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_POS 3
#define RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_MASK \
(3U << RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_POS)
#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS 1
#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_MASK \
(3U << RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS)
#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_SUPPORTED (1U << 0)
struct rpmi_cppc_get_fastchan_region_resp {
s32 status;
u32 flags;
u32 region_addr_lo;
u32 region_addr_hi;
u32 region_size_lo;
u32 region_size_hi;
u32 db_addr_lo;
u32 db_addr_hi;
u32 db_setmask_lo;
u32 db_setmask_hi;
u32 db_preservemask_lo;
u32 db_preservemask_hi;
};
enum rpmi_cppc_fast_channel_db_width {
RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_8 = 0x0,
RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_16 = 0x1,
RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_32 = 0x2,
RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_64 = 0x3,
};
enum rpmi_cppc_fast_channel_cppc_mode {
RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_PASSIVE = 0x0,
RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_ACTIVE = 0x1,
RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_MAX_IDX,
};
struct rpmi_cppc_hart_list_req {
u32 start_index;
};
struct rpmi_cppc_hart_list_resp {
s32 status;
u32 remaining;
u32 returned;
/* remaining space need to be adjusted for the above 3 u32's */
u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)];
};
/** RPMI Clock ServiceGroup Service IDs */
enum rpmi_clock_service_id {
RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01,
RPMI_CLOCK_SRV_GET_NUM_CLOCKS = 0x02,
RPMI_CLOCK_SRV_GET_ATTRIBUTES = 0x03,
RPMI_CLOCK_SRV_GET_SUPPORTED_RATES = 0x04,
RPMI_CLOCK_SRV_SET_CONFIG = 0x05,
RPMI_CLOCK_SRV_GET_CONFIG = 0x06,
RPMI_CLOCK_SRV_SET_RATE = 0x07,
RPMI_CLOCK_SRV_GET_RATE = 0x08,
RPMI_CLOCK_SRV_MAX_COUNT,
};
struct rpmi_clock_get_num_clocks_resp {
s32 status;
u32 num_clocks;
};
struct rpmi_clock_get_attributes_req {
u32 clock_id;
};
struct rpmi_clock_get_attributes_resp {
s32 status;
#define RPMI_CLOCK_FLAGS_FORMAT_POS 30
#define RPMI_CLOCK_FLAGS_FORMAT_MASK \
(3U << RPMI_CLOCK_FLAGS_CLOCK_FORMAT_POS)
#define RPMI_CLOCK_FLAGS_FORMAT_DISCRETE 0
#define RPMI_CLOCK_FLAGS_FORMAT_LINEAR 1
u32 flags;
u32 num_rates;
u32 transition_latency;
u8 name[16];
};
struct rpmi_clock_get_supported_rates_req {
u32 clock_id;
u32 clock_rate_index;
};
struct rpmi_clock_get_supported_rates_resp {
s32 status;
u32 flags;
u32 remaining;
u32 returned;
u32 clock_rate[0];
};
struct rpmi_clock_set_config_req {
u32 clock_id;
#define RPMI_CLOCK_CONFIG_ENABLE (1U << 0)
u32 config;
};
struct rpmi_clock_set_config_resp {
s32 status;
};
struct rpmi_clock_get_config_req {
u32 clock_id;
};
struct rpmi_clock_get_config_resp {
s32 status;
u32 config;
};
struct rpmi_clock_set_rate_req {
u32 clock_id;
#define RPMI_CLOCK_SET_RATE_FLAGS_MASK (3U << 0)
#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_DOWN 0
#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_UP 1
#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_PLAT 2
u32 flags;
u32 clock_rate_low;
u32 clock_rate_high;
};
struct rpmi_clock_set_rate_resp {
s32 status;
};
struct rpmi_clock_get_rate_req {
u32 clock_id;
};
struct rpmi_clock_get_rate_resp {
s32 status;
u32 clock_rate_low;
u32 clock_rate_high;
};
#endif /* !__RPMI_MSGPROT_H__ */

View File

@@ -1,26 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_MPXY_H__
#define __FDT_MPXY_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/fdt/fdt_driver.h>
#ifdef CONFIG_FDT_MPXY
int fdt_mpxy_init(const void *fdt);
#else
static inline int fdt_mpxy_init(const void *fdt) { return 0; }
#endif
#endif

View File

@@ -1,85 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2024 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_MPXY_RPMI_MBOX_H__
#define __FDT_MPXY_RPMI_MBOX_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_mpxy.h>
#include <sbi_utils/mailbox/fdt_mailbox.h>
#include <sbi_utils/mailbox/rpmi_msgprot.h>
#include <sbi_utils/mpxy/fdt_mpxy.h>
/** Convert the mpxy attribute ID to attribute array index */
#define attr_id2index(attr_id) (attr_id - SBI_MPXY_ATTR_MSGPROTO_ATTR_START)
enum mpxy_msgprot_rpmi_attr_id {
MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID = SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION,
MPXY_MSGPROT_RPMI_ATTR_IMPL_ID,
MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION,
MPXY_MSGPROT_RPMI_ATTR_MAX_ID
};
/**
* MPXY message protocol attributes for RPMI
* Order of attribute fields must follow the
* attribute IDs in `enum mpxy_msgprot_rpmi_attr_id`
*/
struct mpxy_rpmi_channel_attrs {
u32 servicegrp_id;
u32 servicegrp_ver;
u32 impl_id;
u32 impl_ver;
};
/** Make sure all attributes are packed for direct memcpy */
#define assert_field_offset(field, attr_offset) \
_Static_assert( \
((offsetof(struct mpxy_rpmi_channel_attrs, field)) / \
sizeof(u32)) == (attr_offset - SBI_MPXY_ATTR_MSGPROTO_ATTR_START),\
"field " #field \
" from struct mpxy_rpmi_channel_attrs invalid offset, expected " #attr_offset)
assert_field_offset(servicegrp_id, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID);
assert_field_offset(servicegrp_ver, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION);
assert_field_offset(impl_id, MPXY_MSGPROT_RPMI_ATTR_IMPL_ID);
assert_field_offset(impl_ver, MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION);
/** MPXY RPMI service data for each service group */
struct mpxy_rpmi_service_data {
u8 id;
u32 min_tx_len;
u32 max_tx_len;
u32 min_rx_len;
u32 max_rx_len;
};
/** MPXY RPMI mbox data for each service group */
struct mpxy_rpmi_mbox_data {
u32 servicegrp_id;
u32 num_services;
struct mpxy_rpmi_service_data *service_data;
/** Transfer RPMI service group message */
int (*xfer_group)(void *context, struct mbox_chan *chan,
struct mbox_xfer *xfer);
/** Setup RPMI service group context for MPXY */
int (*setup_group)(void **context, struct mbox_chan *chan,
const struct mpxy_rpmi_mbox_data *data);
/** Cleanup RPMI service group context for MPXY */
void (*cleanup_group)(void *context);
};
/** Common probe function for MPXY RPMI drivers */
int mpxy_rpmi_mbox_init(const void *fdt, int nodeoff, const struct fdt_match *match);
#endif

View File

@@ -1,25 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __FDT_REGMAP_H__
#define __FDT_REGMAP_H__
#include <sbi_utils/fdt/fdt_driver.h>
#include <sbi_utils/regmap/regmap.h>
struct fdt_phandle_args;
/** Get regmap instance based on phandle */
int fdt_regmap_get_by_phandle(const void *fdt, u32 phandle,
struct regmap **out_rmap);
/** Get regmap instance based on "regmap" property of the specified DT node */
int fdt_regmap_get(const void *fdt, int nodeoff, struct regmap **out_rmap);
#endif

View File

@@ -1,67 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#ifndef __REGMAP_H__
#define __REGMAP_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_list.h>
/** Representation of a regmap instance */
struct regmap {
/** Uniquie ID of the regmap instance assigned by the driver */
unsigned int id;
/** Configuration of regmap registers */
int reg_shift;
int reg_stride;
unsigned int reg_base;
unsigned int reg_max;
/** Read a regmap register */
int (*reg_read)(struct regmap *rmap, unsigned int reg,
unsigned int *val);
/** Write a regmap register */
int (*reg_write)(struct regmap *rmap, unsigned int reg,
unsigned int val);
/** Read-modify-write a regmap register */
int (*reg_update_bits)(struct regmap *rmap, unsigned int reg,
unsigned int mask, unsigned int val);
/** List */
struct sbi_dlist node;
};
static inline struct regmap *to_regmap(struct sbi_dlist *node)
{
return container_of(node, struct regmap, node);
}
/** Find a registered regmap instance */
struct regmap *regmap_find(unsigned int id);
/** Register a regmap instance */
int regmap_add(struct regmap *rmap);
/** Un-register a regmap instance */
void regmap_remove(struct regmap *rmap);
/** Read a register in a regmap instance */
int regmap_read(struct regmap *rmap, unsigned int reg, unsigned int *val);
/** Write a register in a regmap instance */
int regmap_write(struct regmap *rmap, unsigned int reg, unsigned int val);
/** Read-modify-write a register in a regmap instance */
int regmap_update_bits(struct regmap *rmap, unsigned int reg,
unsigned int mask, unsigned int val);
#endif

Some files were not shown because too many files have changed in this diff Show More