From 1e5eb44ca9e3b427c6acb2b5c8ba296f7aa9334c Mon Sep 17 00:00:00 2001 From: Eyck Jentzsch Date: Tue, 27 Jan 2026 20:45:47 +0100 Subject: [PATCH] initial commit --- .envrc | 15 + .gitignore | 9 + .gitmodules | 9 + .vscode/settings.json | 25 + CMakeLists.txt | 92 + CMakePresets.json | 67 + README.md | 19 + cmake/rv32gc_gnu.cmake | 12 + cmake/rv32imac_gnu.cmake | 12 + cmake/rv64gc_gnu.cmake | 12 + cmake/rv64imac_gnu.cmake | 12 + port/moonlight/README.md | 6 + port/moonlight/aclint.h | 39 + port/moonlight/board.c | 39 + port/moonlight/bootup.c | 101 + port/moonlight/exception.c | 79 + port/moonlight/gen/aclint.h | 97 + port/moonlight/gen/ethmac.h | 477 +++ port/moonlight/gen/uart.h | 176 + port/moonlight/hwtimer.h | 32 + port/moonlight/mnrs_network_driver.c | 1067 +++++ port/moonlight/platform.h | 24 + port/moonlight/riscv-csr.h | 3791 +++++++++++++++++ port/moonlight/riscv-traps.h | 68 + port/moonlight/trap_non_vectored.c | 57 + port/moonlight/trap_vectored.c | 78 + port/moonlight/tx_timer_interrupt.c | 134 + port/moonlight/uart.h | 25 + port/moonlight/vector_table.c | 166 + port/moonlight/vector_table.h | 113 + port/picolibc/port.c | 41 + port/threadx/CMakeLists.txt | 20 + port/threadx/inc/csr.h | 373 ++ port/threadx/inc/nx_port.h | 194 + port/threadx/inc/nx_user.h | 785 ++++ port/threadx/inc/tx_port.h | 279 ++ port/threadx/src/tx_initialize_low_level.S | 163 + port/threadx/src/tx_thread_context_restore.S | 382 ++ port/threadx/src/tx_thread_context_save.S | 283 ++ .../threadx/src/tx_thread_interrupt_control.S | 81 + port/threadx/src/tx_thread_schedule.S | 305 ++ port/threadx/src/tx_thread_stack_build.S | 227 + port/threadx/src/tx_thread_system_return.S | 174 + src/flash.lds | 12 + src/memory_map.ld | 7 + src/ram.lds | 12 + src/rom.lds | 12 + src/sections.ld | 184 + src/tcp_demo/main.c | 337 ++ src/thread_demo/main.c | 321 ++ third-party/netxduo | 1 + third-party/picolibc | 1 + third-party/threadx | 1 + 53 files changed, 11048 insertions(+) create mode 100644 .envrc create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .vscode/settings.json create mode 100644 CMakeLists.txt create mode 100644 CMakePresets.json create mode 100644 README.md create mode 100644 cmake/rv32gc_gnu.cmake create mode 100644 cmake/rv32imac_gnu.cmake create mode 100644 cmake/rv64gc_gnu.cmake create mode 100644 cmake/rv64imac_gnu.cmake create mode 100644 port/moonlight/README.md create mode 100644 port/moonlight/aclint.h create mode 100644 port/moonlight/board.c create mode 100644 port/moonlight/bootup.c create mode 100644 port/moonlight/exception.c create mode 100644 port/moonlight/gen/aclint.h create mode 100644 port/moonlight/gen/ethmac.h create mode 100644 port/moonlight/gen/uart.h create mode 100644 port/moonlight/hwtimer.h create mode 100644 port/moonlight/mnrs_network_driver.c create mode 100644 port/moonlight/platform.h create mode 100644 port/moonlight/riscv-csr.h create mode 100644 port/moonlight/riscv-traps.h create mode 100644 port/moonlight/trap_non_vectored.c create mode 100644 port/moonlight/trap_vectored.c create mode 100644 port/moonlight/tx_timer_interrupt.c create mode 100644 port/moonlight/uart.h create mode 100644 port/moonlight/vector_table.c create mode 100644 port/moonlight/vector_table.h create mode 100644 port/picolibc/port.c create mode 100644 port/threadx/CMakeLists.txt create mode 100644 port/threadx/inc/csr.h create mode 100644 port/threadx/inc/nx_port.h create mode 100644 port/threadx/inc/nx_user.h create mode 100644 port/threadx/inc/tx_port.h create mode 100644 port/threadx/src/tx_initialize_low_level.S create mode 100644 port/threadx/src/tx_thread_context_restore.S create mode 100644 port/threadx/src/tx_thread_context_save.S create mode 100644 port/threadx/src/tx_thread_interrupt_control.S create mode 100644 port/threadx/src/tx_thread_schedule.S create mode 100644 port/threadx/src/tx_thread_stack_build.S create mode 100644 port/threadx/src/tx_thread_system_return.S create mode 100644 src/flash.lds create mode 100644 src/memory_map.ld create mode 100644 src/ram.lds create mode 100644 src/rom.lds create mode 100644 src/sections.ld create mode 100644 src/tcp_demo/main.c create mode 100644 src/thread_demo/main.c create mode 160000 third-party/netxduo create mode 160000 third-party/picolibc create mode 160000 third-party/threadx diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..0d8c3de --- /dev/null +++ b/.envrc @@ -0,0 +1,15 @@ +distro=`/bin/lsb_release -i -s` +if [ $distro == "CentOS" ]; then + . /opt/rh/devtoolset-8/enable + . /opt/rh/llvm-toolset-7/enable + . /opt/rh/rh-python38/enable +elif [ $distro == "Rocky" ]; then + . /opt/rh/gcc-toolset-11/enable +elif [ $distro == "RockyLinux" ]; then + . /opt/rh/gcc-toolset-14/enable +fi +if has module; then + module load tools/gcc-riscv64-unknown-elf/15 tools/cmake/3.28 tools/utilities tools/vscode +fi +layout python3 +[ -f .envrc.$USER ] && . .envrc.$USER diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..572155c --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +/build/ +/.venv +/.cache +/.direnv +/*.log +*.fst +*.ftr +*.pcap +/.vscode/launch.json diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..0fd4ff1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "third-party/threadx"] + path = third-party/threadx + url = https://github.com/eclipse-threadx/threadx.git +[submodule "third-party/netxduo"] + path = third-party/netxduo + url = https://github.com/eclipse-threadx/netxduo.git +[submodule "third-party/picolibc"] + path = third-party/picolibc + url = https://github.com/picolibc/picolibc.git diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..8d9d923 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,25 @@ +{ + "debug.allowBreakpointsEverywhere": true, + "editor.rulers": [ + { + "column": 140, + "comment": "clang-format" + } + ], + "editor.formatOnSave": false, + "clangd.arguments": [ + "--pretty", + "--background-index", + "--compile-commands-dir=${workspaceFolder}/build" + ], + "cmake.copyCompileCommands": "${workspaceFolder}/build/compile_commands.json", + "cmake.options.statusBarVisibility": "hidden", + "cmake.options.advanced": { + "configure": { + "projectStatusVisibility": "visible" + }, + "build": { + "statusBarVisibility": "visible" + } + } +} \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..1b65268 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,92 @@ +cmake_minimum_required(VERSION 3.21) +############################################################################### +# we are building embedded, so no shared libs +set(BUILD_SHARED_LIBS OFF) +include(${CMAKE_TOOLCHAIN_FILE}) +############################################################################### +# Adds picolibc +#set(CMAKE_SYSTEM_PROCESSOR riscv) +add_subdirectory(third-party/picolibc) +#add_subdirectory(picolibc/semihost) +target_link_libraries(c PUBLIC gcc) +############################################################################### +# Adds threadx +set(THREADX_CUSTOM_PORT ${CMAKE_CURRENT_LIST_DIR}/port/threadx) +add_subdirectory(third-party/threadx) +target_link_libraries(threadx PUBLIC c) +# Adds netxduo +set(NETXDUO_CUSTOM_PORT ${CMAKE_CURRENT_LIST_DIR}/port/threadx) +set(NXD_ENABLE_FILE_SERVERS OFF) +set(NX_USER_FILE ${CMAKE_CURRENT_LIST_DIR}/port/threadx/inc/nx_user.h) +add_subdirectory(third-party/netxduo) +target_link_libraries(netxduo PUBLIC c) +############################################################################### +project(threadx_demo C ASM) +option(NX_DEBUG "compile netxduo debug output in" OFF) +set(TARGET_MEM "ram" CACHE STRING "memory map to use" ) +set(CMAKE_EXECUTABLE_SUFFIX_C ".elf") + +function(setup_target TARGET) + set(options) + set(oneValueArgs) # none for now + set(multiValueArgs LIBRARIES SOURCES) + cmake_parse_arguments(ST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + if(ST_UNPARSED_ARGUMENTS) + message(FATAL_ERROR "setup_target(${target} ...): unknown args: ${ST_UNPARSED_ARGUMENTS}") + endif() + + add_executable(${TARGET}) + target_sources(${TARGET} PRIVATE + port/picolibc/port.c + port/moonlight/bootup.c + port/moonlight/board.c + port/moonlight/trap_non_vectored.c + port/moonlight/exception.c + port/moonlight/vector_table.c + port/moonlight/tx_timer_interrupt.c + ) + if("netxduo" IN_LIST ST_LIBRARIES) + target_sources(${TARGET} PRIVATE port/moonlight/mnrs_network_driver.c) + endif() + + if(ST_SOURCES) + target_sources(${TARGET} PRIVATE ${ST_SOURCES}) + endif() + + target_include_directories(${TARGET} PRIVATE port/moonlight src) + target_compile_options(${TARGET} PRIVATE + -ffreestanding + -fno-builtin + -fdata-sections + -ffunction-sections + ) + if(NX_DEBUG) + target_compile_definitions(${TARGET} PRIVATE NX_DEBUG) + endif() + target_link_directories(${TARGET} PRIVATE src) # needed for linker script includes + target_link_options(${TARGET} PRIVATE + -nostartfiles + -nostdlib + -T ${CMAKE_SOURCE_DIR}/src/${TARGET_MEM}.lds + -Wl,--gc-sections + -Wl,-Map=${CMAKE_BINARY_DIR}/${TARGET}.map + ) + if(ST_LIBRARIES) + target_link_libraries(${TARGET} PRIVATE ${ST_LIBRARIES}) + endif() + target_link_libraries(${TARGET} PRIVATE threadx) + + add_custom_command(TARGET ${TARGET} POST_BUILD + COMMAND ${OBJCOPY} -O ihex $ ${CMAKE_BINARY_DIR}/${TARGET}.hex + COMMAND ${OBJCOPY} -O binary $ ${CMAKE_BINARY_DIR}/${TARGET}.bin + COMMAND ${SIZE} $ + COMMAND ${OBJDUMP} -S $ > ${TARGET}.dis + COMMENT "Creating collateral for ${TARGET}" + ) +endfunction() + +setup_target(thread_demo SOURCES src/thread_demo/main.c) +setup_target(tcp_demo + LIBRARIES netxduo + SOURCES src/tcp_demo/main.c +) diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 0000000..ac9f2c7 --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,67 @@ +{ + "version": 3, + "vendor": { + "conan": {} + }, + "cmakeMinimumRequired": { + "major": 3, + "minor": 24, + "patch": 0 + }, + "configurePresets": [ + { + "name": "Debug", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv64imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "Debug", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + }, + { + "name": "Debug32", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv32imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "Debug", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + }, + { + "name": "Release", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv64imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "RelWithDebInfo", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + }, + { + "name": "Release32", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv32imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "RelWithDebInfo", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + }, + { + "name": "MinSizeRel", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv64imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "MinSizeRel", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + }, + { + "name": "MinSizeRel32", + "binaryDir": "${sourceDir}/build/${presetName}", + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/rv32imac_gnu.cmake", + "CMAKE_BUILD_TYPE": "MinSizeRel", + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON" + } + } + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..36180d5 --- /dev/null +++ b/README.md @@ -0,0 +1,19 @@ +# Running the RTOS + +This repositiory contains prototypical RISC-V implementation of ThreadX designed to run on the RISC-V VP by MINRES (tested on commit 1b5585). + +## Building the RTOS + +Two presets are provided, targetting RV32GC and RV64GC. + +## Running on the VP + +A run command can look like this: + +```bash +riscv-vp --isa=rv64gc_m -f build/Debug/main.elf +``` + +## What is running? + +The current implementation is just a demo, taken from the existing threadx qemu implementation. diff --git a/cmake/rv32gc_gnu.cmake b/cmake/rv32gc_gnu.cmake new file mode 100644 index 0000000..e56c15e --- /dev/null +++ b/cmake/rv32gc_gnu.cmake @@ -0,0 +1,12 @@ +# Name of the target +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_SYSTEM_PROCESSOR risc-v32) + +set(THREADX_ARCH "risc-v32") +set(THREADX_TOOLCHAIN "gnu") +set(ARCH_FLAGS "-march=rv32gc_zicsr_zifencei -mabi=ilp32d -mcmodel=medany") +set(CFLAGS "${ARCH_FLAGS}") +set(ASFLAGS "${ARCH_FLAGS}") +set(LDFLAGS "${ARCH_FLAGS}") + +include(${CMAKE_CURRENT_LIST_DIR}/../third-party/threadx/cmake/riscv64-unknown-elf.cmake) diff --git a/cmake/rv32imac_gnu.cmake b/cmake/rv32imac_gnu.cmake new file mode 100644 index 0000000..d201192 --- /dev/null +++ b/cmake/rv32imac_gnu.cmake @@ -0,0 +1,12 @@ +# Name of the target +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_SYSTEM_PROCESSOR risc-v32) + +set(THREADX_ARCH "risc-v32") +set(THREADX_TOOLCHAIN "gnu") +set(ARCH_FLAGS "-march=rv32imac_zicsr_zifencei -mabi=ilp32 -mcmodel=medany") +set(CFLAGS "${ARCH_FLAGS}") +set(ASFLAGS "${ARCH_FLAGS}") +set(LDFLAGS "${ARCH_FLAGS}") + +include(${CMAKE_CURRENT_LIST_DIR}/../third-party/threadx/cmake/riscv64-unknown-elf.cmake) diff --git a/cmake/rv64gc_gnu.cmake b/cmake/rv64gc_gnu.cmake new file mode 100644 index 0000000..499ee7f --- /dev/null +++ b/cmake/rv64gc_gnu.cmake @@ -0,0 +1,12 @@ +# Name of the target +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_SYSTEM_PROCESSOR risc-v64) + +set(THREADX_ARCH "risc-v64") +set(THREADX_TOOLCHAIN "gnu") +set(ARCH_FLAGS "-march=rv64gc_zicsr_zifencei -mabi=lp64d -mcmodel=medany") +set(CFLAGS "${ARCH_FLAGS}") +set(ASFLAGS "${ARCH_FLAGS}") +set(LDFLAGS "${ARCH_FLAGS}") + +include(${CMAKE_CURRENT_LIST_DIR}/../third-party/threadx/cmake/riscv64-unknown-elf.cmake) diff --git a/cmake/rv64imac_gnu.cmake b/cmake/rv64imac_gnu.cmake new file mode 100644 index 0000000..f75b897 --- /dev/null +++ b/cmake/rv64imac_gnu.cmake @@ -0,0 +1,12 @@ +# Name of the target +set(CMAKE_SYSTEM_NAME Generic) +set(CMAKE_SYSTEM_PROCESSOR risc-v64) + +set(THREADX_ARCH "risc-v64") +set(THREADX_TOOLCHAIN "gnu") +set(ARCH_FLAGS "-march=rv64imac_zicsr_zifencei -mabi=lp64 -mcmodel=medany") +set(CFLAGS "${ARCH_FLAGS}") +set(ASFLAGS "${ARCH_FLAGS}") +set(LDFLAGS "${ARCH_FLAGS}") + +include(${CMAKE_CURRENT_LIST_DIR}/../third-party/threadx/cmake/riscv64-unknown-elf.cmake) diff --git a/port/moonlight/README.md b/port/moonlight/README.md new file mode 100644 index 0000000..7df794b --- /dev/null +++ b/port/moonlight/README.md @@ -0,0 +1,6 @@ +## Generation of register files + +``` +pip install --extra-index-url https://git.minres.com/api/packages/Tools/pypi/simple peakrdl-mnrs +mnrs_gen --firmware -o port/moonlight EthMac.rdl +``` diff --git a/port/moonlight/aclint.h b/port/moonlight/aclint.h new file mode 100644 index 0000000..793ce89 --- /dev/null +++ b/port/moonlight/aclint.h @@ -0,0 +1,39 @@ +#ifndef _DEVICES_ACLINT_H +#define _DEVICES_ACLINT_H + +#include "gen/aclint.h" +#include + +static void set_aclint_mtime(volatile aclint_t* reg, uint64_t value) { + set_aclint_mtime_hi(reg, (uint32_t)(value >> 32)); + set_aclint_mtime_lo(reg, (uint32_t)value); +} + +static uint64_t get_aclint_mtime(volatile aclint_t* reg) { +#if(__riscv_xlen == 64) + // this assume little endianness + volatile uint64_t* mtime = (volatile uint64_t*)(uint64_t)(®->MTIME_LO); + return *mtime; +#else + uint32_t mtimeh_val; + uint32_t mtimel_val; + do { + mtimeh_val = get_aclint_mtime_hi(reg); + mtimel_val = get_aclint_mtime_lo(reg); + } while(mtimeh_val != get_aclint_mtime_hi(reg)); + return (uint64_t)((((uint64_t)mtimeh_val) << 32) | mtimel_val); +#endif +} + +static void set_aclint_mtimecmp(volatile aclint_t* reg, uint64_t value) { + set_aclint_mtimecmp0lo(reg, (uint32_t)0xFFFFFFFF); + set_aclint_mtimecmp0hi(reg, (uint32_t)(value >> 32)); + set_aclint_mtimecmp0lo(reg, (uint32_t)value); +} + +static uint64_t get_aclint_mtimecmp(volatile aclint_t* reg) { + uint64_t value = ((uint64_t)get_aclint_mtimecmp0hi(reg) << 32) | (uint64_t)get_aclint_mtimecmp0lo(reg); + return value; +} + +#endif /* _DEVICES_ACLINT_H */ diff --git a/port/moonlight/board.c b/port/moonlight/board.c new file mode 100644 index 0000000..cca080d --- /dev/null +++ b/port/moonlight/board.c @@ -0,0 +1,39 @@ +#include "hwtimer.h" +#include "csr.h" +#include "platform.h" +#include "uart.h" +#include +#include + +// needed by picolibc/port.c +int uart_putc(int ch) { + int intr_enable = riscv_mintr_get(); + riscv_mintr_off(); + uart_write(uart, ch); + riscv_mintr_restore(intr_enable); + return 1; +} + +int uart_getc(void) { + int intr_enable = riscv_mintr_get(); + riscv_mintr_off(); + int ch = uart_read(uart); + riscv_mintr_restore(intr_enable); + return ch; +} + +int uart_init(void) { + puts("[UART0] : Uart Init Done, this is Test output!"); + return 0; +}; + +int board_init(void) { + int ret; + ret = uart_init(); + if(ret) + return ret; + ret = hwtimer_init(); + if(ret) + return ret; + return 0; +} diff --git a/port/moonlight/bootup.c b/port/moonlight/bootup.c new file mode 100644 index 0000000..88f4a86 --- /dev/null +++ b/port/moonlight/bootup.c @@ -0,0 +1,101 @@ +/* + Simple C++ startup routine to setup CRT + SPDX-License-Identifier: Unlicense + + (https://five-embeddev.com/ | http://www.shincbm.com/) + +*/ + +#include +#include + +#ifdef __cplusplus +#define EXTERN_C extern "C" +#else +#define EXTERN_C extern +#endif + +// Generic C function pointer. +typedef void(*function_t)(void) ; +// These symbols are defined by the linker script. +// See linker.lds +EXTERN_C uint8_t __bss_start; +EXTERN_C uint8_t __bss_end; +EXTERN_C const uint8_t __data_source; +EXTERN_C uint8_t __data_start; +EXTERN_C uint8_t __data_end; + +EXTERN_C function_t __init_array_start; +EXTERN_C function_t __init_array_end; +EXTERN_C function_t __fini_array_start; +EXTERN_C function_t __fini_array_end; + +// This function will be placed by the linker script according to the section +// Raw function 'called' by the CPU with no runtime. +EXTERN_C void _start(void) __attribute__ ((naked,section(".text.init"))); + +// Entry and exit points as C functions. +EXTERN_C void _initialize(void) __attribute__ ((noreturn,section(".init"))); +EXTERN_C void _exit(int exit_code) __attribute__ ((noreturn,noinline,weak)); + +// Standard entry point, no arguments. +extern int main(void); + +// The linker script will place this in the reset entry point. +// It will be 'called' with no stack or C runtime configuration. +// NOTE - this only supports a single hart. +// tp will not be initialized +void _start(void) { + // Setup SP and GP + // The locations are defined in the linker script + __asm__ volatile ( + ".option push;" + // The 'norelax' option is critical here. + // Without 'norelax' the global pointer will + // be loaded relative to the global pointer! + ".option norelax;" + "la gp, __global_pointer$;" + ".option pop;" + "la sp, _sp;" + "jal zero, _initialize;" + : /* output: none %0 */ + : /* input: none */ + : /* clobbers: none */); + // This point will not be executed, _initialize() will be called with no return. +} + +// At this point we have a stack and global poiner, but no access to global variables. +void _initialize(void) { + // Init memory regions + // Clear the .bss section (global variables with no initial values) + memset((void*) &__bss_start, + 0, + (&__bss_end - &__bss_start)); + // Initialize the .data section (global variables with initial values) + memcpy((void*)&__data_start, + (const void*)&__data_source, + (&__data_end - &__data_end)); + // Call constructors + for (const function_t* entry=&__init_array_start; + entry < &__init_array_end; + ++entry) { + (*entry)(); + } + int rc = main(); + // Call destructors + for (const function_t* entry=&__fini_array_start; + entry < &__fini_array_end; + ++entry) { + (*entry)(); + } + _exit(rc); +} + +// This should never be called. Busy loop with the CPU in idle state. +void _exit(int exit_code) { + (void)exit_code; + // Halt + while (1) { + __asm__ volatile ("wfi"); + } +} \ No newline at end of file diff --git a/port/moonlight/exception.c b/port/moonlight/exception.c new file mode 100644 index 0000000..8bc2caf --- /dev/null +++ b/port/moonlight/exception.c @@ -0,0 +1,79 @@ +#include +#include +#include "riscv-traps.h" +#include "riscv-csr.h" + +// Expect this to increment one time per second - inside exception handler, after each return of MTI handler. +static volatile uint64_t ecall_count = 0; + +void exception(uintptr_t mcause, uintptr_t mepc, uintptr_t mtval) { + switch(mcause) { + case RISCV_EXCP_INSTRUCTION_ADDRESS_MISALIGNED: { + puts("[EXCEPTION] : Instruction address misaligned\n"); + break; + } + case RISCV_EXCP_INSTRUCTION_ACCESS_FAULT: { + puts("[EXCEPTION] : Instruction access fault\n"); + break; + } + case RISCV_EXCP_ILLEGAL_INSTRUCTION: { + puts("[EXCEPTION] : Illegal Instruction\n"); + break; + } + case RISCV_EXCP_BREAKPOINT: { + puts("[EXCEPTION] : Breakpoint\n"); + break; + } + case RISCV_EXCP_LOAD_ADDRESS_MISALIGNED: { + puts("[EXCEPTION] : Load address misaligned"); + printf("[EXCEPTION] : PC: 0x%x\n", mepc); + printf("[EXCEPTION] : Addr: 0x%x\n", mtval); + break; + } + case RISCV_EXCP_LOAD_ACCESS_FAULT: { + puts("[EXCEPTION] : Load access fault\n"); + break; + } + case RISCV_EXCP_STORE_AMO_ADDRESS_MISALIGNED: { + puts("[EXCEPTION] : Store/AMO address misaligned"); + printf("[EXCEPTION] : PC: 0x%x\n", mepc); + printf("[EXCEPTION] : Addr: 0x%x\n", mtval); + break; + } + case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: { + puts("[EXCEPTION] : Store/AMO access fault\n"); + break; + } + case RISCV_EXCP_ENVIRONMENT_CALL_FROM_U_MODE: { + puts("[EXCEPTION] : Environment call from U-mode\n"); + break; + } + case RISCV_EXCP_ENVIRONMENT_CALL_FROM_S_MODE: { + puts("[EXCEPTION] : Environment call from S-mode\n"); + break; + } + case RISCV_EXCP_ENVIRONMENT_CALL_FROM_M_MODE: { + puts("[EXCEPTION] : Environment call from M-mode\n"); + ecall_count++; + csr_write_mepc(mepc+4); + break; + } + case RISCV_EXCP_INSTRUCTION_PAGE_FAULT: { + puts("[EXCEPTION] : Instruction page fault\n"); + break; + } + case RISCV_EXCP_LOAD_PAGE_FAULT: { + puts("[EXCEPTION] : Load page fault\n"); + break; + } + case RISCV_EXCP_STORE_AMO_PAGE_FAULT: { + puts("[EXCEPTION] : Store/AMO page fault\n"); + break; + } + default: { + printf("[EXCEPTION] : Unknown trap cause: %lu\n", mcause); + } + } + while(1) + ; +} diff --git a/port/moonlight/gen/aclint.h b/port/moonlight/gen/aclint.h new file mode 100644 index 0000000..b71dff1 --- /dev/null +++ b/port/moonlight/gen/aclint.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2023 - 2024 MINRES Technologies GmbH + * + * SPDX-License-Identifier: Apache-2.0 + * + * Generated at 2024-08-02 08:46:07 UTC + * by peakrdl_mnrs version 1.2.7 + */ + +#ifndef _BSP_ACLINT_H +#define _BSP_ACLINT_H + +#include + +typedef struct { + volatile uint32_t MSIP[4096]; + struct { + volatile uint32_t LO; + volatile uint32_t HI; + } MTIMECMP[4095]; + volatile uint32_t MTIME_LO; + volatile uint32_t MTIME_HI; +} aclint_t; + +#define ACLINT_MSIP_OFFS 0 +#define ACLINT_MSIP_MASK 0x1 +#define ACLINT_MSIP(V) ((V & ACLINT_MSIP0_MASK) << ACLINT_MSIP0_OFFS) + +#define ACLINT_MTIMECMPLO_OFFS 0 +#define ACLINT_MTIMECMPLO_MASK 0xffffffff +#define ACLINT_MTIMECMPLO(V) ((V & ACLINT_MTIMECMP0LO_MASK) << ACLINT_MTIMECMP0LO_OFFS) + +#define ACLINT_MTIMECMPHI_OFFS 0 +#define ACLINT_MTIMECMPHI_MASK 0xffffffff +#define ACLINT_MTIMECMPHI(V) ((V & ACLINT_MTIMECMP0HI_MASK) << ACLINT_MTIMECMP0HI_OFFS) + +#define ACLINT_MTIME_LO_OFFS 0 +#define ACLINT_MTIME_LO_MASK 0xffffffff +#define ACLINT_MTIME_LO(V) ((V & ACLINT_MTIME_LO_MASK) << ACLINT_MTIME_LO_OFFS) + +#define ACLINT_MTIME_HI_OFFS 0 +#define ACLINT_MTIME_HI_MASK 0xffffffff +#define ACLINT_MTIME_HI(V) ((V & ACLINT_MTIME_HI_MASK) << ACLINT_MTIME_HI_OFFS) + +// ACLINT_MSIP0 +static inline uint32_t get_aclint_msip0(volatile aclint_t* reg) { return reg->MSIP[0]; } +static inline void set_aclint_msip0(volatile aclint_t* reg, uint32_t value) { reg->MSIP[0] = value; } +static inline uint32_t get_aclint_msip0_msip(volatile aclint_t* reg) { return (reg->MSIP[0] >> 0) & 0x1; } +static inline void set_aclint_msip0_msip(volatile aclint_t* reg, uint8_t value) { + reg->MSIP[0] = (reg->MSIP[0] & ~(0x1U << 0)) | (value << 0); +} + +// ACLINT_MSIP +static inline uint32_t get_aclint_msip(volatile aclint_t* reg, unsigned idx) { return reg->MSIP[idx]; } +static inline void set_aclint_msip(volatile aclint_t* reg, unsigned idx, uint32_t value) { reg->MSIP[idx] = value; } +static inline uint32_t get_aclint_msip_msip(volatile aclint_t* reg, unsigned idx) { return (reg->MSIP[idx] >> 0) & 0x1; } +static inline void set_aclint_msip_msip(volatile aclint_t* reg, unsigned idx, uint8_t value) { + reg->MSIP[idx] = (reg->MSIP[idx] & ~(0x1U << 0)) | (value << 0); +} + +// ACLINT_MTIMECMP0LO +static inline uint32_t get_aclint_mtimecmp0lo(volatile aclint_t* reg) { return (reg->MTIMECMP[0].LO >> 0) & 0xffffffff; } +static inline void set_aclint_mtimecmp0lo(volatile aclint_t* reg, uint32_t value) { + reg->MTIMECMP[0].LO = (reg->MTIMECMP[0].LO & ~(0xffffffffU << 0)) | (value << 0); +} + +// ACLINT_MTIMECMPxLO +static inline uint32_t get_aclint_mtimecmplo(volatile aclint_t* reg, unsigned idx) { return (reg->MTIMECMP[idx].LO >> 0) & 0xffffffff; } +static inline void set_aclint_mtimecmplo(volatile aclint_t* reg, unsigned idx, uint32_t value) { + reg->MTIMECMP[idx].LO = (reg->MTIMECMP[idx].LO & ~(0xffffffffU << 0)) | (value << 0); +} + +// ACLINT_MTIMECMP0HI +static inline uint32_t get_aclint_mtimecmp0hi(volatile aclint_t* reg) { return (reg->MTIMECMP[0].HI >> 0) & 0xffffffff; } +static inline void set_aclint_mtimecmp0hi(volatile aclint_t* reg, uint32_t value) { + reg->MTIMECMP[0].HI = (reg->MTIMECMP[0].HI & ~(0xffffffffU << 0)) | (value << 0); +} + +// ACLINT_MTIMECMPxHI +static inline uint32_t get_aclint_mtimecmphi(volatile aclint_t* reg, unsigned idx) { return (reg->MTIMECMP[idx].HI >> 0) & 0xffffffff; } +static inline void set_aclint_mtimecmphi(volatile aclint_t* reg, unsigned idx, uint32_t value) { + reg->MTIMECMP[idx].HI = (reg->MTIMECMP[idx].HI & ~(0xffffffffU << 0)) | (value << 0); +} + +// ACLINT_MTIME_LO +static inline uint32_t get_aclint_mtime_lo(volatile aclint_t* reg) { return (reg->MTIME_LO >> 0) & 0xffffffff; } +static inline void set_aclint_mtime_lo(volatile aclint_t* reg, uint32_t value) { + reg->MTIME_LO = (reg->MTIME_LO & ~(0xffffffffU << 0)) | (value << 0); +} + +// ACLINT_MTIME_HI +static inline uint32_t get_aclint_mtime_hi(volatile aclint_t* reg) { return (reg->MTIME_HI >> 0) & 0xffffffff; } +static inline void set_aclint_mtime_hi(volatile aclint_t* reg, uint32_t value) { + reg->MTIME_HI = (reg->MTIME_HI & ~(0xffffffffU << 0)) | (value << 0); +} + +#endif /* _BSP_ACLINT_H */ diff --git a/port/moonlight/gen/ethmac.h b/port/moonlight/gen/ethmac.h new file mode 100644 index 0000000..6865f70 --- /dev/null +++ b/port/moonlight/gen/ethmac.h @@ -0,0 +1,477 @@ +/* +* Copyright (c) 2023 - 2026 MINRES Technologies GmbH +* +* SPDX-License-Identifier: Apache-2.0 +* +* Generated at 2026-01-26 15:33:03 UTC +* by peakrdl_mnrs version 1.3.1 +*/ + +#ifndef _BSP_ETHMAC_H +#define _BSP_ETHMAC_H + +#include + +typedef struct { + volatile uint32_t MAC_CTRL; + uint8_t fill0[12]; + volatile uint32_t MAC_TX; + volatile uint32_t MAC_TX_AVAILABILITY; + uint8_t fill1[8]; + volatile uint32_t MAC_RX; + uint8_t fill2[8]; + volatile uint32_t MAC_RX_STATS; + volatile uint32_t MAC_INTR; + uint8_t fill3[12]; + volatile uint32_t MDIO_DATA; + volatile uint32_t MDIO_STATUS; + volatile uint32_t MDIO_CONFIG; + volatile uint32_t MDIO_INTR; + uint8_t fill4[16]; + volatile uint32_t MDIO_SCLK_CONFIG; + volatile uint32_t MDIO_SSGEN_SETUP; + volatile uint32_t MDIO_SSGEN_HOLD; + volatile uint32_t MDIO_SSGEN_DISABLE; + volatile uint32_t MDIO_SSGEN_ACTIVE_HIGH; + uint8_t fill5[28]; + volatile uint32_t MDIO_DIRECT_WRITE; + volatile uint32_t MDIO_DIRECT_READ_WRITE; + volatile uint32_t MDIO_DIRECT_READ; +}ethmac_t; + +#define ETHMAC_MAC_CTRL_TX_FLUSH_OFFS 0 +#define ETHMAC_MAC_CTRL_TX_FLUSH_MASK 0x1 +#define ETHMAC_MAC_CTRL_TX_FLUSH(V) ((V & ETHMAC_MAC_CTRL_TX_FLUSH_MASK) << ETHMAC_MAC_CTRL_TX_FLUSH_OFFS) + +#define ETHMAC_MAC_CTRL_TX_READY_OFFS 1 +#define ETHMAC_MAC_CTRL_TX_READY_MASK 0x1 +#define ETHMAC_MAC_CTRL_TX_READY(V) ((V & ETHMAC_MAC_CTRL_TX_READY_MASK) << ETHMAC_MAC_CTRL_TX_READY_OFFS) + +#define ETHMAC_MAC_CTRL_TX_ALIGNER_ENABLE_OFFS 2 +#define ETHMAC_MAC_CTRL_TX_ALIGNER_ENABLE_MASK 0x1 +#define ETHMAC_MAC_CTRL_TX_ALIGNER_ENABLE(V) ((V & ETHMAC_MAC_CTRL_TX_ALIGNER_ENABLE_MASK) << ETHMAC_MAC_CTRL_TX_ALIGNER_ENABLE_OFFS) + +#define ETHMAC_MAC_CTRL_RX_FLUSH_OFFS 4 +#define ETHMAC_MAC_CTRL_RX_FLUSH_MASK 0x1 +#define ETHMAC_MAC_CTRL_RX_FLUSH(V) ((V & ETHMAC_MAC_CTRL_RX_FLUSH_MASK) << ETHMAC_MAC_CTRL_RX_FLUSH_OFFS) + +#define ETHMAC_MAC_CTRL_RX_PENDING_OFFS 5 +#define ETHMAC_MAC_CTRL_RX_PENDING_MASK 0x1 +#define ETHMAC_MAC_CTRL_RX_PENDING(V) ((V & ETHMAC_MAC_CTRL_RX_PENDING_MASK) << ETHMAC_MAC_CTRL_RX_PENDING_OFFS) + +#define ETHMAC_MAC_CTRL_RX_ALIGNER_ENABLE_OFFS 6 +#define ETHMAC_MAC_CTRL_RX_ALIGNER_ENABLE_MASK 0x1 +#define ETHMAC_MAC_CTRL_RX_ALIGNER_ENABLE(V) ((V & ETHMAC_MAC_CTRL_RX_ALIGNER_ENABLE_MASK) << ETHMAC_MAC_CTRL_RX_ALIGNER_ENABLE_OFFS) + +#define ETHMAC_MAC_TX_OFFS 0 +#define ETHMAC_MAC_TX_MASK 0xffffffff +#define ETHMAC_MAC_TX(V) ((V & ETHMAC_MAC_TX_MASK) << ETHMAC_MAC_TX_OFFS) + +#define ETHMAC_MAC_TX_AVAILABILITY_OFFS 0 +#define ETHMAC_MAC_TX_AVAILABILITY_MASK 0x7ff +#define ETHMAC_MAC_TX_AVAILABILITY(V) ((V & ETHMAC_MAC_TX_AVAILABILITY_MASK) << ETHMAC_MAC_TX_AVAILABILITY_OFFS) + +#define ETHMAC_MAC_RX_OFFS 0 +#define ETHMAC_MAC_RX_MASK 0xffffffff +#define ETHMAC_MAC_RX(V) ((V & ETHMAC_MAC_RX_MASK) << ETHMAC_MAC_RX_OFFS) + +#define ETHMAC_MAC_RX_STATS_RX_ERRORS_OFFS 0 +#define ETHMAC_MAC_RX_STATS_RX_ERRORS_MASK 0xff +#define ETHMAC_MAC_RX_STATS_RX_ERRORS(V) ((V & ETHMAC_MAC_RX_STATS_RX_ERRORS_MASK) << ETHMAC_MAC_RX_STATS_RX_ERRORS_OFFS) + +#define ETHMAC_MAC_RX_STATS_RX_DROPS_OFFS 8 +#define ETHMAC_MAC_RX_STATS_RX_DROPS_MASK 0xff +#define ETHMAC_MAC_RX_STATS_RX_DROPS(V) ((V & ETHMAC_MAC_RX_STATS_RX_DROPS_MASK) << ETHMAC_MAC_RX_STATS_RX_DROPS_OFFS) + +#define ETHMAC_MAC_INTR_TX_FREE_INTR_ENABLE_OFFS 0 +#define ETHMAC_MAC_INTR_TX_FREE_INTR_ENABLE_MASK 0x1 +#define ETHMAC_MAC_INTR_TX_FREE_INTR_ENABLE(V) ((V & ETHMAC_MAC_INTR_TX_FREE_INTR_ENABLE_MASK) << ETHMAC_MAC_INTR_TX_FREE_INTR_ENABLE_OFFS) + +#define ETHMAC_MAC_INTR_RX_DATA_AVAIL_INTR_ENABLE_OFFS 1 +#define ETHMAC_MAC_INTR_RX_DATA_AVAIL_INTR_ENABLE_MASK 0x1 +#define ETHMAC_MAC_INTR_RX_DATA_AVAIL_INTR_ENABLE(V) ((V & ETHMAC_MAC_INTR_RX_DATA_AVAIL_INTR_ENABLE_MASK) << ETHMAC_MAC_INTR_RX_DATA_AVAIL_INTR_ENABLE_OFFS) + +#define ETHMAC_MDIO_DATA_DATA_OFFS 0 +#define ETHMAC_MDIO_DATA_DATA_MASK 0xff +#define ETHMAC_MDIO_DATA_DATA(V) ((V & ETHMAC_MDIO_DATA_DATA_MASK) << ETHMAC_MDIO_DATA_DATA_OFFS) + +#define ETHMAC_MDIO_DATA_WRITE_OFFS 8 +#define ETHMAC_MDIO_DATA_WRITE_MASK 0x1 +#define ETHMAC_MDIO_DATA_WRITE(V) ((V & ETHMAC_MDIO_DATA_WRITE_MASK) << ETHMAC_MDIO_DATA_WRITE_OFFS) + +#define ETHMAC_MDIO_DATA_READ_OFFS 9 +#define ETHMAC_MDIO_DATA_READ_MASK 0x1 +#define ETHMAC_MDIO_DATA_READ(V) ((V & ETHMAC_MDIO_DATA_READ_MASK) << ETHMAC_MDIO_DATA_READ_OFFS) + +#define ETHMAC_MDIO_DATA_SSGEN_OFFS 11 +#define ETHMAC_MDIO_DATA_SSGEN_MASK 0x1 +#define ETHMAC_MDIO_DATA_SSGEN(V) ((V & ETHMAC_MDIO_DATA_SSGEN_MASK) << ETHMAC_MDIO_DATA_SSGEN_OFFS) + +#define ETHMAC_MDIO_DATA_RX_DATA_INVALID_OFFS 31 +#define ETHMAC_MDIO_DATA_RX_DATA_INVALID_MASK 0x1 +#define ETHMAC_MDIO_DATA_RX_DATA_INVALID(V) ((V & ETHMAC_MDIO_DATA_RX_DATA_INVALID_MASK) << ETHMAC_MDIO_DATA_RX_DATA_INVALID_OFFS) + +#define ETHMAC_MDIO_STATUS_TX_FREE_OFFS 0 +#define ETHMAC_MDIO_STATUS_TX_FREE_MASK 0x3f +#define ETHMAC_MDIO_STATUS_TX_FREE(V) ((V & ETHMAC_MDIO_STATUS_TX_FREE_MASK) << ETHMAC_MDIO_STATUS_TX_FREE_OFFS) + +#define ETHMAC_MDIO_STATUS_RX_AVAIL_OFFS 16 +#define ETHMAC_MDIO_STATUS_RX_AVAIL_MASK 0x3f +#define ETHMAC_MDIO_STATUS_RX_AVAIL(V) ((V & ETHMAC_MDIO_STATUS_RX_AVAIL_MASK) << ETHMAC_MDIO_STATUS_RX_AVAIL_OFFS) + +#define ETHMAC_MDIO_CONFIG_CPOL_OFFS 0 +#define ETHMAC_MDIO_CONFIG_CPOL_MASK 0x1 +#define ETHMAC_MDIO_CONFIG_CPOL(V) ((V & ETHMAC_MDIO_CONFIG_CPOL_MASK) << ETHMAC_MDIO_CONFIG_CPOL_OFFS) + +#define ETHMAC_MDIO_CONFIG_CPHA_OFFS 1 +#define ETHMAC_MDIO_CONFIG_CPHA_MASK 0x1 +#define ETHMAC_MDIO_CONFIG_CPHA(V) ((V & ETHMAC_MDIO_CONFIG_CPHA_MASK) << ETHMAC_MDIO_CONFIG_CPHA_OFFS) + +#define ETHMAC_MDIO_CONFIG_MODE_OFFS 4 +#define ETHMAC_MDIO_CONFIG_MODE_MASK 0x1 +#define ETHMAC_MDIO_CONFIG_MODE(V) ((V & ETHMAC_MDIO_CONFIG_MODE_MASK) << ETHMAC_MDIO_CONFIG_MODE_OFFS) + +#define ETHMAC_MDIO_INTR_TX_IE_OFFS 0 +#define ETHMAC_MDIO_INTR_TX_IE_MASK 0x1 +#define ETHMAC_MDIO_INTR_TX_IE(V) ((V & ETHMAC_MDIO_INTR_TX_IE_MASK) << ETHMAC_MDIO_INTR_TX_IE_OFFS) + +#define ETHMAC_MDIO_INTR_RX_IE_OFFS 1 +#define ETHMAC_MDIO_INTR_RX_IE_MASK 0x1 +#define ETHMAC_MDIO_INTR_RX_IE(V) ((V & ETHMAC_MDIO_INTR_RX_IE_MASK) << ETHMAC_MDIO_INTR_RX_IE_OFFS) + +#define ETHMAC_MDIO_INTR_TX_IP_OFFS 8 +#define ETHMAC_MDIO_INTR_TX_IP_MASK 0x1 +#define ETHMAC_MDIO_INTR_TX_IP(V) ((V & ETHMAC_MDIO_INTR_TX_IP_MASK) << ETHMAC_MDIO_INTR_TX_IP_OFFS) + +#define ETHMAC_MDIO_INTR_RX_IP_OFFS 9 +#define ETHMAC_MDIO_INTR_RX_IP_MASK 0x1 +#define ETHMAC_MDIO_INTR_RX_IP(V) ((V & ETHMAC_MDIO_INTR_RX_IP_MASK) << ETHMAC_MDIO_INTR_RX_IP_OFFS) + +#define ETHMAC_MDIO_INTR_TX_ACTIVE_OFFS 16 +#define ETHMAC_MDIO_INTR_TX_ACTIVE_MASK 0x1 +#define ETHMAC_MDIO_INTR_TX_ACTIVE(V) ((V & ETHMAC_MDIO_INTR_TX_ACTIVE_MASK) << ETHMAC_MDIO_INTR_TX_ACTIVE_OFFS) + +#define ETHMAC_MDIO_SCLK_CONFIG_OFFS 0 +#define ETHMAC_MDIO_SCLK_CONFIG_MASK 0xfff +#define ETHMAC_MDIO_SCLK_CONFIG(V) ((V & ETHMAC_MDIO_SCLK_CONFIG_MASK) << ETHMAC_MDIO_SCLK_CONFIG_OFFS) + +#define ETHMAC_MDIO_SSGEN_SETUP_OFFS 0 +#define ETHMAC_MDIO_SSGEN_SETUP_MASK 0xfff +#define ETHMAC_MDIO_SSGEN_SETUP(V) ((V & ETHMAC_MDIO_SSGEN_SETUP_MASK) << ETHMAC_MDIO_SSGEN_SETUP_OFFS) + +#define ETHMAC_MDIO_SSGEN_HOLD_OFFS 0 +#define ETHMAC_MDIO_SSGEN_HOLD_MASK 0xfff +#define ETHMAC_MDIO_SSGEN_HOLD(V) ((V & ETHMAC_MDIO_SSGEN_HOLD_MASK) << ETHMAC_MDIO_SSGEN_HOLD_OFFS) + +#define ETHMAC_MDIO_SSGEN_DISABLE_OFFS 0 +#define ETHMAC_MDIO_SSGEN_DISABLE_MASK 0xfff +#define ETHMAC_MDIO_SSGEN_DISABLE(V) ((V & ETHMAC_MDIO_SSGEN_DISABLE_MASK) << ETHMAC_MDIO_SSGEN_DISABLE_OFFS) + +#define ETHMAC_MDIO_SSGEN_ACTIVE_HIGH_OFFS 0 +#define ETHMAC_MDIO_SSGEN_ACTIVE_HIGH_MASK 0x1 +#define ETHMAC_MDIO_SSGEN_ACTIVE_HIGH(V) ((V & ETHMAC_MDIO_SSGEN_ACTIVE_HIGH_MASK) << ETHMAC_MDIO_SSGEN_ACTIVE_HIGH_OFFS) + +#define ETHMAC_MDIO_DIRECT_WRITE_OFFS 0 +#define ETHMAC_MDIO_DIRECT_WRITE_MASK 0xff +#define ETHMAC_MDIO_DIRECT_WRITE(V) ((V & ETHMAC_MDIO_DIRECT_WRITE_MASK) << ETHMAC_MDIO_DIRECT_WRITE_OFFS) + +#define ETHMAC_MDIO_DIRECT_READ_WRITE_OFFS 0 +#define ETHMAC_MDIO_DIRECT_READ_WRITE_MASK 0xff +#define ETHMAC_MDIO_DIRECT_READ_WRITE(V) ((V & ETHMAC_MDIO_DIRECT_READ_WRITE_MASK) << ETHMAC_MDIO_DIRECT_READ_WRITE_OFFS) + +#define ETHMAC_MDIO_DIRECT_READ_OFFS 0 +#define ETHMAC_MDIO_DIRECT_READ_MASK 0xff +#define ETHMAC_MDIO_DIRECT_READ(V) ((V & ETHMAC_MDIO_DIRECT_READ_MASK) << ETHMAC_MDIO_DIRECT_READ_OFFS) + +//ETHMAC_MAC_CTRL +static inline uint32_t get_ethmac_mac_ctrl(volatile ethmac_t* reg){ + return reg->MAC_CTRL; +} +static inline void set_ethmac_mac_ctrl(volatile ethmac_t* reg, uint32_t value){ + reg->MAC_CTRL = value; +} +static inline uint32_t get_ethmac_mac_ctrl_tx_flush(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 0) & 0x1; +} +static inline void set_ethmac_mac_ctrl_tx_flush(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_CTRL = (reg->MAC_CTRL & ~(0x1U << 0)) | (value << 0); +} +static inline uint32_t get_ethmac_mac_ctrl_tx_ready(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 1) & 0x1; +} +static inline uint32_t get_ethmac_mac_ctrl_tx_aligner_enable(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 2) & 0x1; +} +static inline void set_ethmac_mac_ctrl_tx_aligner_enable(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_CTRL = (reg->MAC_CTRL & ~(0x1U << 2)) | (value << 2); +} +static inline uint32_t get_ethmac_mac_ctrl_rx_flush(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 4) & 0x1; +} +static inline void set_ethmac_mac_ctrl_rx_flush(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_CTRL = (reg->MAC_CTRL & ~(0x1U << 4)) | (value << 4); +} +static inline uint32_t get_ethmac_mac_ctrl_rx_pending(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 5) & 0x1; +} +static inline uint32_t get_ethmac_mac_ctrl_rx_aligner_enable(volatile ethmac_t* reg){ + return (reg->MAC_CTRL >> 6) & 0x1; +} +static inline void set_ethmac_mac_ctrl_rx_aligner_enable(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_CTRL = (reg->MAC_CTRL & ~(0x1U << 6)) | (value << 6); +} + +//ETHMAC_MAC_TX +static inline uint32_t get_ethmac_mac_tx(volatile ethmac_t* reg){ + return (reg->MAC_TX >> 0) & 0xffffffff; +} +static inline void set_ethmac_mac_tx(volatile ethmac_t* reg, uint32_t value){ + reg->MAC_TX = (reg->MAC_TX & ~(0xffffffffU << 0)) | (value << 0); +} + +//ETHMAC_MAC_TX_AVAILABILITY +static inline uint32_t get_ethmac_mac_tx_availability(volatile ethmac_t* reg){ + return reg->MAC_TX_AVAILABILITY; +} +static inline uint32_t get_ethmac_mac_tx_availability_words_avail(volatile ethmac_t* reg){ + return (reg->MAC_TX_AVAILABILITY >> 0) & 0x7ff; +} + +//ETHMAC_MAC_RX +static inline uint32_t get_ethmac_mac_rx(volatile ethmac_t* reg){ + return (reg->MAC_RX >> 0) & 0xffffffff; +} + +//ETHMAC_MAC_RX_STATS +static inline uint32_t get_ethmac_mac_rx_stats(volatile ethmac_t* reg){ + return reg->MAC_RX_STATS; +} +static inline uint32_t get_ethmac_mac_rx_stats_rx_errors(volatile ethmac_t* reg){ + return (reg->MAC_RX_STATS >> 0) & 0xff; +} +static inline uint32_t get_ethmac_mac_rx_stats_rx_drops(volatile ethmac_t* reg){ + return (reg->MAC_RX_STATS >> 8) & 0xff; +} + +//ETHMAC_MAC_INTR +static inline uint32_t get_ethmac_mac_intr(volatile ethmac_t* reg){ + return reg->MAC_INTR; +} +static inline void set_ethmac_mac_intr(volatile ethmac_t* reg, uint32_t value){ + reg->MAC_INTR = value; +} +static inline uint32_t get_ethmac_mac_intr_tx_free_intr_enable(volatile ethmac_t* reg){ + return (reg->MAC_INTR >> 0) & 0x1; +} +static inline void set_ethmac_mac_intr_tx_free_intr_enable(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_INTR = (reg->MAC_INTR & ~(0x1U << 0)) | (value << 0); +} +static inline uint32_t get_ethmac_mac_intr_rx_data_avail_intr_enable(volatile ethmac_t* reg){ + return (reg->MAC_INTR >> 1) & 0x1; +} +static inline void set_ethmac_mac_intr_rx_data_avail_intr_enable(volatile ethmac_t* reg, uint8_t value){ + reg->MAC_INTR = (reg->MAC_INTR & ~(0x1U << 1)) | (value << 1); +} + +//ETHMAC_MDIO_DATA +static inline uint32_t get_ethmac_mdio_data(volatile ethmac_t* reg){ + return reg->MDIO_DATA; +} +static inline void set_ethmac_mdio_data(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_DATA = value; +} +static inline uint32_t get_ethmac_mdio_data_data(volatile ethmac_t* reg){ + return (reg->MDIO_DATA >> 0) & 0xff; +} +static inline void set_ethmac_mdio_data_data(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DATA = (reg->MDIO_DATA & ~(0xffU << 0)) | (value << 0); +} +static inline uint32_t get_ethmac_mdio_data_write(volatile ethmac_t* reg){ + return (reg->MDIO_DATA >> 8) & 0x1; +} +static inline void set_ethmac_mdio_data_write(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DATA = (reg->MDIO_DATA & ~(0x1U << 8)) | (value << 8); +} +static inline uint32_t get_ethmac_mdio_data_read(volatile ethmac_t* reg){ + return (reg->MDIO_DATA >> 9) & 0x1; +} +static inline void set_ethmac_mdio_data_read(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DATA = (reg->MDIO_DATA & ~(0x1U << 9)) | (value << 9); +} +static inline uint32_t get_ethmac_mdio_data_ssgen(volatile ethmac_t* reg){ + return (reg->MDIO_DATA >> 11) & 0x1; +} +static inline void set_ethmac_mdio_data_ssgen(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DATA = (reg->MDIO_DATA & ~(0x1U << 11)) | (value << 11); +} +static inline uint32_t get_ethmac_mdio_data_rx_data_invalid(volatile ethmac_t* reg){ + return (reg->MDIO_DATA >> 31) & 0x1; +} + +//ETHMAC_MDIO_STATUS +static inline uint32_t get_ethmac_mdio_status(volatile ethmac_t* reg){ + return reg->MDIO_STATUS; +} +static inline uint32_t get_ethmac_mdio_status_tx_free(volatile ethmac_t* reg){ + return (reg->MDIO_STATUS >> 0) & 0x3f; +} +static inline uint32_t get_ethmac_mdio_status_rx_avail(volatile ethmac_t* reg){ + return (reg->MDIO_STATUS >> 16) & 0x3f; +} + +//ETHMAC_MDIO_CONFIG +static inline uint32_t get_ethmac_mdio_config(volatile ethmac_t* reg){ + return reg->MDIO_CONFIG; +} +static inline void set_ethmac_mdio_config(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_CONFIG = value; +} +static inline uint32_t get_ethmac_mdio_config_cpol(volatile ethmac_t* reg){ + return (reg->MDIO_CONFIG >> 0) & 0x1; +} +static inline void set_ethmac_mdio_config_cpol(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_CONFIG = (reg->MDIO_CONFIG & ~(0x1U << 0)) | (value << 0); +} +static inline uint32_t get_ethmac_mdio_config_cpha(volatile ethmac_t* reg){ + return (reg->MDIO_CONFIG >> 1) & 0x1; +} +static inline void set_ethmac_mdio_config_cpha(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_CONFIG = (reg->MDIO_CONFIG & ~(0x1U << 1)) | (value << 1); +} +static inline uint32_t get_ethmac_mdio_config_mode(volatile ethmac_t* reg){ + return (reg->MDIO_CONFIG >> 4) & 0x1; +} +static inline void set_ethmac_mdio_config_mode(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_CONFIG = (reg->MDIO_CONFIG & ~(0x1U << 4)) | (value << 4); +} + +//ETHMAC_MDIO_INTR +static inline uint32_t get_ethmac_mdio_intr(volatile ethmac_t* reg){ + return reg->MDIO_INTR; +} +static inline void set_ethmac_mdio_intr(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_INTR = value; +} +static inline uint32_t get_ethmac_mdio_intr_tx_ie(volatile ethmac_t* reg){ + return (reg->MDIO_INTR >> 0) & 0x1; +} +static inline void set_ethmac_mdio_intr_tx_ie(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_INTR = (reg->MDIO_INTR & ~(0x1U << 0)) | (value << 0); +} +static inline uint32_t get_ethmac_mdio_intr_rx_ie(volatile ethmac_t* reg){ + return (reg->MDIO_INTR >> 1) & 0x1; +} +static inline void set_ethmac_mdio_intr_rx_ie(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_INTR = (reg->MDIO_INTR & ~(0x1U << 1)) | (value << 1); +} +static inline uint32_t get_ethmac_mdio_intr_tx_ip(volatile ethmac_t* reg){ + return (reg->MDIO_INTR >> 8) & 0x1; +} +static inline void set_ethmac_mdio_intr_tx_ip(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_INTR = (reg->MDIO_INTR & ~(0x1U << 8)) | (value << 8); +} +static inline uint32_t get_ethmac_mdio_intr_rx_ip(volatile ethmac_t* reg){ + return (reg->MDIO_INTR >> 9) & 0x1; +} +static inline void set_ethmac_mdio_intr_rx_ip(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_INTR = (reg->MDIO_INTR & ~(0x1U << 9)) | (value << 9); +} +static inline uint32_t get_ethmac_mdio_intr_tx_active(volatile ethmac_t* reg){ + return (reg->MDIO_INTR >> 16) & 0x1; +} + +//ETHMAC_MDIO_SCLK_CONFIG +static inline uint32_t get_ethmac_mdio_sclk_config(volatile ethmac_t* reg){ + return reg->MDIO_SCLK_CONFIG; +} +static inline void set_ethmac_mdio_sclk_config(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_SCLK_CONFIG = value; +} +static inline uint32_t get_ethmac_mdio_sclk_config_clk_divider(volatile ethmac_t* reg){ + return (reg->MDIO_SCLK_CONFIG >> 0) & 0xfff; +} +static inline void set_ethmac_mdio_sclk_config_clk_divider(volatile ethmac_t* reg, uint16_t value){ + reg->MDIO_SCLK_CONFIG = (reg->MDIO_SCLK_CONFIG & ~(0xfffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_SSGEN_SETUP +static inline uint32_t get_ethmac_mdio_ssgen_setup(volatile ethmac_t* reg){ + return reg->MDIO_SSGEN_SETUP; +} +static inline void set_ethmac_mdio_ssgen_setup(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_SSGEN_SETUP = value; +} +static inline uint32_t get_ethmac_mdio_ssgen_setup_setup_cycles(volatile ethmac_t* reg){ + return (reg->MDIO_SSGEN_SETUP >> 0) & 0xfff; +} +static inline void set_ethmac_mdio_ssgen_setup_setup_cycles(volatile ethmac_t* reg, uint16_t value){ + reg->MDIO_SSGEN_SETUP = (reg->MDIO_SSGEN_SETUP & ~(0xfffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_SSGEN_HOLD +static inline uint32_t get_ethmac_mdio_ssgen_hold(volatile ethmac_t* reg){ + return reg->MDIO_SSGEN_HOLD; +} +static inline void set_ethmac_mdio_ssgen_hold(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_SSGEN_HOLD = value; +} +static inline uint32_t get_ethmac_mdio_ssgen_hold_hold_cycles(volatile ethmac_t* reg){ + return (reg->MDIO_SSGEN_HOLD >> 0) & 0xfff; +} +static inline void set_ethmac_mdio_ssgen_hold_hold_cycles(volatile ethmac_t* reg, uint16_t value){ + reg->MDIO_SSGEN_HOLD = (reg->MDIO_SSGEN_HOLD & ~(0xfffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_SSGEN_DISABLE +static inline uint32_t get_ethmac_mdio_ssgen_disable(volatile ethmac_t* reg){ + return reg->MDIO_SSGEN_DISABLE; +} +static inline void set_ethmac_mdio_ssgen_disable(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_SSGEN_DISABLE = value; +} +static inline uint32_t get_ethmac_mdio_ssgen_disable_disable_cycles(volatile ethmac_t* reg){ + return (reg->MDIO_SSGEN_DISABLE >> 0) & 0xfff; +} +static inline void set_ethmac_mdio_ssgen_disable_disable_cycles(volatile ethmac_t* reg, uint16_t value){ + reg->MDIO_SSGEN_DISABLE = (reg->MDIO_SSGEN_DISABLE & ~(0xfffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_SSGEN_ACTIVE_HIGH +static inline uint32_t get_ethmac_mdio_ssgen_active_high(volatile ethmac_t* reg){ + return reg->MDIO_SSGEN_ACTIVE_HIGH; +} +static inline void set_ethmac_mdio_ssgen_active_high(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_SSGEN_ACTIVE_HIGH = value; +} +static inline uint32_t get_ethmac_mdio_ssgen_active_high_spi_cs_active_high(volatile ethmac_t* reg){ + return (reg->MDIO_SSGEN_ACTIVE_HIGH >> 0) & 0x1; +} +static inline void set_ethmac_mdio_ssgen_active_high_spi_cs_active_high(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_SSGEN_ACTIVE_HIGH = (reg->MDIO_SSGEN_ACTIVE_HIGH & ~(0x1U << 0)) | (value << 0); +} + +//ETHMAC_MDIO_DIRECT_WRITE +static inline void set_ethmac_mdio_direct_write(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_DIRECT_WRITE = value; +} +static inline void set_ethmac_mdio_direct_write_data(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DIRECT_WRITE = (reg->MDIO_DIRECT_WRITE & ~(0xffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_DIRECT_READ_WRITE +static inline void set_ethmac_mdio_direct_read_write(volatile ethmac_t* reg, uint32_t value){ + reg->MDIO_DIRECT_READ_WRITE = value; +} +static inline void set_ethmac_mdio_direct_read_write_data(volatile ethmac_t* reg, uint8_t value){ + reg->MDIO_DIRECT_READ_WRITE = (reg->MDIO_DIRECT_READ_WRITE & ~(0xffU << 0)) | (value << 0); +} + +//ETHMAC_MDIO_DIRECT_READ +static inline uint32_t get_ethmac_mdio_direct_read(volatile ethmac_t* reg){ + return reg->MDIO_DIRECT_READ; +} +static inline uint32_t get_ethmac_mdio_direct_read_data(volatile ethmac_t* reg){ + return (reg->MDIO_DIRECT_READ >> 0) & 0xff; +} + +#endif /* _BSP_ETHMAC_H */ \ No newline at end of file diff --git a/port/moonlight/gen/uart.h b/port/moonlight/gen/uart.h new file mode 100644 index 0000000..1d344d1 --- /dev/null +++ b/port/moonlight/gen/uart.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2023 - 2024 MINRES Technologies GmbH + * + * SPDX-License-Identifier: Apache-2.0 + * + * Generated at 2024-08-02 08:46:07 UTC + * by peakrdl_mnrs version 1.2.7 + */ + +#ifndef _BSP_UART_H +#define _BSP_UART_H + +#include + +typedef struct { + volatile uint32_t RX_TX_REG; + volatile uint32_t INT_CTRL_REG; + volatile uint32_t CLK_DIVIDER_REG; + volatile uint32_t FRAME_CONFIG_REG; + volatile uint32_t STATUS_REG; +} uart_t; + +#define UART_RX_TX_REG_DATA_OFFS 0 +#define UART_RX_TX_REG_DATA_MASK 0xff +#define UART_RX_TX_REG_DATA(V) ((V & UART_RX_TX_REG_DATA_MASK) << UART_RX_TX_REG_DATA_OFFS) + +#define UART_RX_TX_REG_RX_AVAIL_OFFS 14 +#define UART_RX_TX_REG_RX_AVAIL_MASK 0x1 +#define UART_RX_TX_REG_RX_AVAIL(V) ((V & UART_RX_TX_REG_RX_AVAIL_MASK) << UART_RX_TX_REG_RX_AVAIL_OFFS) + +#define UART_RX_TX_REG_TX_FREE_OFFS 15 +#define UART_RX_TX_REG_TX_FREE_MASK 0x1 +#define UART_RX_TX_REG_TX_FREE(V) ((V & UART_RX_TX_REG_TX_FREE_MASK) << UART_RX_TX_REG_TX_FREE_OFFS) + +#define UART_RX_TX_REG_TX_EMPTY_OFFS 16 +#define UART_RX_TX_REG_TX_EMPTY_MASK 0x1 +#define UART_RX_TX_REG_TX_EMPTY(V) ((V & UART_RX_TX_REG_TX_EMPTY_MASK) << UART_RX_TX_REG_TX_EMPTY_OFFS) + +#define UART_INT_CTRL_REG_WRITE_INTR_ENABLE_OFFS 0 +#define UART_INT_CTRL_REG_WRITE_INTR_ENABLE_MASK 0x1 +#define UART_INT_CTRL_REG_WRITE_INTR_ENABLE(V) ((V & UART_INT_CTRL_REG_WRITE_INTR_ENABLE_MASK) << UART_INT_CTRL_REG_WRITE_INTR_ENABLE_OFFS) + +#define UART_INT_CTRL_REG_READ_INTR_ENABLE_OFFS 1 +#define UART_INT_CTRL_REG_READ_INTR_ENABLE_MASK 0x1 +#define UART_INT_CTRL_REG_READ_INTR_ENABLE(V) ((V & UART_INT_CTRL_REG_READ_INTR_ENABLE_MASK) << UART_INT_CTRL_REG_READ_INTR_ENABLE_OFFS) + +#define UART_INT_CTRL_REG_BREAK_INTR_ENABLE_OFFS 2 +#define UART_INT_CTRL_REG_BREAK_INTR_ENABLE_MASK 0x1 +#define UART_INT_CTRL_REG_BREAK_INTR_ENABLE(V) ((V & UART_INT_CTRL_REG_BREAK_INTR_ENABLE_MASK) << UART_INT_CTRL_REG_BREAK_INTR_ENABLE_OFFS) + +#define UART_INT_CTRL_REG_WRITE_INTR_PEND_OFFS 8 +#define UART_INT_CTRL_REG_WRITE_INTR_PEND_MASK 0x1 +#define UART_INT_CTRL_REG_WRITE_INTR_PEND(V) ((V & UART_INT_CTRL_REG_WRITE_INTR_PEND_MASK) << UART_INT_CTRL_REG_WRITE_INTR_PEND_OFFS) + +#define UART_INT_CTRL_REG_READ_INTR_PEND_OFFS 9 +#define UART_INT_CTRL_REG_READ_INTR_PEND_MASK 0x1 +#define UART_INT_CTRL_REG_READ_INTR_PEND(V) ((V & UART_INT_CTRL_REG_READ_INTR_PEND_MASK) << UART_INT_CTRL_REG_READ_INTR_PEND_OFFS) + +#define UART_INT_CTRL_REG_BREAK_INTR_PEND_OFFS 10 +#define UART_INT_CTRL_REG_BREAK_INTR_PEND_MASK 0x1 +#define UART_INT_CTRL_REG_BREAK_INTR_PEND(V) ((V & UART_INT_CTRL_REG_BREAK_INTR_PEND_MASK) << UART_INT_CTRL_REG_BREAK_INTR_PEND_OFFS) + +#define UART_CLK_DIVIDER_REG_OFFS 0 +#define UART_CLK_DIVIDER_REG_MASK 0xfffff +#define UART_CLK_DIVIDER_REG(V) ((V & UART_CLK_DIVIDER_REG_MASK) << UART_CLK_DIVIDER_REG_OFFS) + +#define UART_FRAME_CONFIG_REG_DATA_LENGTH_OFFS 0 +#define UART_FRAME_CONFIG_REG_DATA_LENGTH_MASK 0x7 +#define UART_FRAME_CONFIG_REG_DATA_LENGTH(V) ((V & UART_FRAME_CONFIG_REG_DATA_LENGTH_MASK) << UART_FRAME_CONFIG_REG_DATA_LENGTH_OFFS) + +#define UART_FRAME_CONFIG_REG_PARITY_OFFS 3 +#define UART_FRAME_CONFIG_REG_PARITY_MASK 0x3 +#define UART_FRAME_CONFIG_REG_PARITY(V) ((V & UART_FRAME_CONFIG_REG_PARITY_MASK) << UART_FRAME_CONFIG_REG_PARITY_OFFS) + +#define UART_FRAME_CONFIG_REG_STOP_BIT_OFFS 5 +#define UART_FRAME_CONFIG_REG_STOP_BIT_MASK 0x1 +#define UART_FRAME_CONFIG_REG_STOP_BIT(V) ((V & UART_FRAME_CONFIG_REG_STOP_BIT_MASK) << UART_FRAME_CONFIG_REG_STOP_BIT_OFFS) + +#define UART_STATUS_REG_READ_ERROR_OFFS 0 +#define UART_STATUS_REG_READ_ERROR_MASK 0x1 +#define UART_STATUS_REG_READ_ERROR(V) ((V & UART_STATUS_REG_READ_ERROR_MASK) << UART_STATUS_REG_READ_ERROR_OFFS) + +#define UART_STATUS_REG_STALL_OFFS 1 +#define UART_STATUS_REG_STALL_MASK 0x1 +#define UART_STATUS_REG_STALL(V) ((V & UART_STATUS_REG_STALL_MASK) << UART_STATUS_REG_STALL_OFFS) + +#define UART_STATUS_REG_BREAK_LINE_OFFS 8 +#define UART_STATUS_REG_BREAK_LINE_MASK 0x1 +#define UART_STATUS_REG_BREAK_LINE(V) ((V & UART_STATUS_REG_BREAK_LINE_MASK) << UART_STATUS_REG_BREAK_LINE_OFFS) + +#define UART_STATUS_REG_BREAK_DETECTED_OFFS 9 +#define UART_STATUS_REG_BREAK_DETECTED_MASK 0x1 +#define UART_STATUS_REG_BREAK_DETECTED(V) ((V & UART_STATUS_REG_BREAK_DETECTED_MASK) << UART_STATUS_REG_BREAK_DETECTED_OFFS) + +#define UART_STATUS_REG_SET_BREAK_OFFS 10 +#define UART_STATUS_REG_SET_BREAK_MASK 0x1 +#define UART_STATUS_REG_SET_BREAK(V) ((V & UART_STATUS_REG_SET_BREAK_MASK) << UART_STATUS_REG_SET_BREAK_OFFS) + +#define UART_STATUS_REG_CLEAR_BREAK_OFFS 11 +#define UART_STATUS_REG_CLEAR_BREAK_MASK 0x1 +#define UART_STATUS_REG_CLEAR_BREAK(V) ((V & UART_STATUS_REG_CLEAR_BREAK_MASK) << UART_STATUS_REG_CLEAR_BREAK_OFFS) + +// UART_RX_TX_REG +static inline uint32_t get_uart_rx_tx_reg(volatile uart_t* reg) { return reg->RX_TX_REG; } +static inline void set_uart_rx_tx_reg(volatile uart_t* reg, uint32_t value) { reg->RX_TX_REG = value; } +static inline uint32_t get_uart_rx_tx_reg_data(volatile uart_t* reg) { return (reg->RX_TX_REG >> 0) & 0xff; } +static inline void set_uart_rx_tx_reg_data(volatile uart_t* reg, uint8_t value) { + reg->RX_TX_REG = (reg->RX_TX_REG & ~(0xffU << 0)) | (value << 0); +} +static inline uint32_t get_uart_rx_tx_reg_rx_avail(volatile uart_t* reg) { return (reg->RX_TX_REG >> 14) & 0x1; } +static inline uint32_t get_uart_rx_tx_reg_tx_free(volatile uart_t* reg) { return (reg->RX_TX_REG >> 15) & 0x1; } +static inline uint32_t get_uart_rx_tx_reg_tx_empty(volatile uart_t* reg) { return (reg->RX_TX_REG >> 16) & 0x1; } + +// UART_INT_CTRL_REG +static inline uint32_t get_uart_int_ctrl_reg(volatile uart_t* reg) { return reg->INT_CTRL_REG; } +static inline void set_uart_int_ctrl_reg(volatile uart_t* reg, uint32_t value) { reg->INT_CTRL_REG = value; } +static inline uint32_t get_uart_int_ctrl_reg_write_intr_enable(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 0) & 0x1; } +static inline void set_uart_int_ctrl_reg_write_intr_enable(volatile uart_t* reg, uint8_t value) { + reg->INT_CTRL_REG = (reg->INT_CTRL_REG & ~(0x1U << 0)) | (value << 0); +} +static inline uint32_t get_uart_int_ctrl_reg_read_intr_enable(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 1) & 0x1; } +static inline void set_uart_int_ctrl_reg_read_intr_enable(volatile uart_t* reg, uint8_t value) { + reg->INT_CTRL_REG = (reg->INT_CTRL_REG & ~(0x1U << 1)) | (value << 1); +} +static inline uint32_t get_uart_int_ctrl_reg_break_intr_enable(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 2) & 0x1; } +static inline void set_uart_int_ctrl_reg_break_intr_enable(volatile uart_t* reg, uint8_t value) { + reg->INT_CTRL_REG = (reg->INT_CTRL_REG & ~(0x1U << 2)) | (value << 2); +} +static inline uint32_t get_uart_int_ctrl_reg_write_intr_pend(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 8) & 0x1; } +static inline uint32_t get_uart_int_ctrl_reg_read_intr_pend(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 9) & 0x1; } +static inline uint32_t get_uart_int_ctrl_reg_break_intr_pend(volatile uart_t* reg) { return (reg->INT_CTRL_REG >> 10) & 0x1; } + +// UART_CLK_DIVIDER_REG +static inline uint32_t get_uart_clk_divider_reg(volatile uart_t* reg) { return reg->CLK_DIVIDER_REG; } +static inline void set_uart_clk_divider_reg(volatile uart_t* reg, uint32_t value) { reg->CLK_DIVIDER_REG = value; } +static inline uint32_t get_uart_clk_divider_reg_clock_divider(volatile uart_t* reg) { return (reg->CLK_DIVIDER_REG >> 0) & 0xfffff; } +static inline void set_uart_clk_divider_reg_clock_divider(volatile uart_t* reg, uint32_t value) { + reg->CLK_DIVIDER_REG = (reg->CLK_DIVIDER_REG & ~(0xfffffU << 0)) | (value << 0); +} + +// UART_FRAME_CONFIG_REG +static inline uint32_t get_uart_frame_config_reg(volatile uart_t* reg) { return reg->FRAME_CONFIG_REG; } +static inline void set_uart_frame_config_reg(volatile uart_t* reg, uint32_t value) { reg->FRAME_CONFIG_REG = value; } +static inline uint32_t get_uart_frame_config_reg_data_length(volatile uart_t* reg) { return (reg->FRAME_CONFIG_REG >> 0) & 0x7; } +static inline void set_uart_frame_config_reg_data_length(volatile uart_t* reg, uint8_t value) { + reg->FRAME_CONFIG_REG = (reg->FRAME_CONFIG_REG & ~(0x7U << 0)) | (value << 0); +} +static inline uint32_t get_uart_frame_config_reg_parity(volatile uart_t* reg) { return (reg->FRAME_CONFIG_REG >> 3) & 0x3; } +static inline void set_uart_frame_config_reg_parity(volatile uart_t* reg, uint8_t value) { + reg->FRAME_CONFIG_REG = (reg->FRAME_CONFIG_REG & ~(0x3U << 3)) | (value << 3); +} +static inline uint32_t get_uart_frame_config_reg_stop_bit(volatile uart_t* reg) { return (reg->FRAME_CONFIG_REG >> 5) & 0x1; } +static inline void set_uart_frame_config_reg_stop_bit(volatile uart_t* reg, uint8_t value) { + reg->FRAME_CONFIG_REG = (reg->FRAME_CONFIG_REG & ~(0x1U << 5)) | (value << 5); +} + +// UART_STATUS_REG +static inline uint32_t get_uart_status_reg(volatile uart_t* reg) { return reg->STATUS_REG; } +static inline void set_uart_status_reg(volatile uart_t* reg, uint32_t value) { reg->STATUS_REG = value; } +static inline uint32_t get_uart_status_reg_read_error(volatile uart_t* reg) { return (reg->STATUS_REG >> 0) & 0x1; } +static inline uint32_t get_uart_status_reg_stall(volatile uart_t* reg) { return (reg->STATUS_REG >> 1) & 0x1; } +static inline uint32_t get_uart_status_reg_break_line(volatile uart_t* reg) { return (reg->STATUS_REG >> 8) & 0x1; } +static inline uint32_t get_uart_status_reg_break_detected(volatile uart_t* reg) { return (reg->STATUS_REG >> 9) & 0x1; } +static inline void set_uart_status_reg_break_detected(volatile uart_t* reg, uint8_t value) { + reg->STATUS_REG = (reg->STATUS_REG & ~(0x1U << 9)) | (value << 9); +} +static inline uint32_t get_uart_status_reg_set_break(volatile uart_t* reg) { return (reg->STATUS_REG >> 10) & 0x1; } +static inline void set_uart_status_reg_set_break(volatile uart_t* reg, uint8_t value) { + reg->STATUS_REG = (reg->STATUS_REG & ~(0x1U << 10)) | (value << 10); +} +static inline uint32_t get_uart_status_reg_clear_break(volatile uart_t* reg) { return (reg->STATUS_REG >> 11) & 0x1; } +static inline void set_uart_status_reg_clear_break(volatile uart_t* reg, uint8_t value) { + reg->STATUS_REG = (reg->STATUS_REG & ~(0x1U << 11)) | (value << 11); +} + +#endif /* _BSP_UART_H */ diff --git a/port/moonlight/hwtimer.h b/port/moonlight/hwtimer.h new file mode 100644 index 0000000..f43ef94 --- /dev/null +++ b/port/moonlight/hwtimer.h @@ -0,0 +1,32 @@ + +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + +#ifndef RISCV_HWTIMER_H +#define RISCV_HWTIMER_H + +#include "platform.h" + +#define TICKNUM_PER_SECOND 32768 +#define TICKNUM_PER_TIMER (TICKNUM_PER_SECOND / 1000) + +static inline int hwtimer_init(void) { + uint64_t time = get_aclint_mtime(aclint); + set_aclint_mtimecmp(aclint, time + TICKNUM_PER_TIMER); + return 0; +} + +static inline int hwtimer_handler(void) { + uint64_t time = get_aclint_mtime(aclint); + set_aclint_mtimecmp(aclint, time + TICKNUM_PER_TIMER); + return 0; +} + +#endif diff --git a/port/moonlight/mnrs_network_driver.c b/port/moonlight/mnrs_network_driver.c new file mode 100644 index 0000000..140405b --- /dev/null +++ b/port/moonlight/mnrs_network_driver.c @@ -0,0 +1,1067 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * Copyright (c) 2025-present Eclipse ThreadX Contributors + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** NetX Component */ +/** */ +/** MINRES ETH Network */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + + +/* Include necessary system files. */ + +#include "gen/ethmac.h" +#include "nx_api.h" +#include "nx_link.h" +#include "platform.h" +#include "tx_port.h" +#include +#if defined(NX_DEBUG) || defined(NX_DEBUG_PACKET) +#include +#endif + +extern int register_irq_handler(unsigned irq_num, void (*handler)()); + +/* Define the Link MTU. Note this is not the same as the IP MTU. The Link MTU + includes the addition of the Physical Network header (usually Ethernet). This + should be larger than the IP instance MTU by the size of the physical header. */ +#define NX_LINK_MTU 1514 +#define NX_MAX_PACKET_SIZE 1536 + + +/* Define Ethernet address format. This is prepended to the incoming IP + and ARP/RARP messages. The frame beginning is 14 bytes, but for speed + purposes, we are going to assume there are 16 bytes free in front of the + prepend pointer and that the prepend pointer is 32-bit aligned. + + Byte Offset Size Meaning + + 0 6 Destination Ethernet Address + 6 6 Source Ethernet Address + 12 2 Ethernet Frame Type, where: + + 0x0800 -> IP Datagram + 0x0806 -> ARP Request/Reply + 0x0835 -> RARP request reply + + 42 18 Padding on ARP and RARP messages only. */ + +#define NX_ETHERNET_IP 0x0800 +#define NX_ETHERNET_ARP 0x0806 +#define NX_ETHERNET_RARP 0x8035 +#define NX_ETHERNET_IPV6 0x86DD +#define NX_ETHERNET_SIZE 14 + +/* For the ethernet driver, physical addresses are allocated starting + at the preset value and then incremented before the next allocation. */ +// Locally Administered Addresses that can be assigned by network, range is 02-00-00 to 02-00-5E +ULONG mnrs_mac_address_msw = 0x0200; +ULONG mnrs_mac_address_lsw = 0x00334450; + + +/* Define driver prototypes. */ + +VOID _nx_mnrs_network_driver(NX_IP_DRIVER *driver_req_ptr); +void _nx_mnrs_network_driver_output(NX_PACKET *packet_ptr, UINT interface_instance_id); +void _nx_mnrs_network_driver_receive(NX_IP *ip_ptr, NX_PACKET *packet_ptr, UINT interface_instance_id); +UINT _nx_mnrs_eth_send_packet(NX_PACKET *packet_ptr, volatile ethmac_t* ethmac); +VOID _nx_mnrs_eth_recv_packet(UINT id, volatile ethmac_t* ethmac); +VOID _nx_mnrs_eth_recv_packet_eth0(VOID); +VOID _nx_mnrs_eth_recv_packet_eth1(VOID); + +#define NX_MAX_MNRS_INTERFACES 4 +#define NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES 3 +typedef struct MAC_ADDRESS_STRUCT +{ + ULONG nx_mac_address_msw; + ULONG nx_mac_address_lsw; +} MAC_ADDRESS; + + +/* Define an application-specific data structure that holds internal + data (such as the state information) of a device driver. + + The example below applies to the MNRS ETH driver. + User shall replace its content with information related to + the actual driver being used. */ +typedef struct _nx_mnrs_network_driver_instance_type +{ + UINT nx_mnrs_network_driver_in_use; + + UINT nx_mnrs_network_driver_id; + + NX_INTERFACE *nx_mnrs_driver_interface_ptr; + + NX_IP *nx_mnrs_driver_ip_ptr; + + MAC_ADDRESS nx_mnrs_driver_mac_address; + + MAC_ADDRESS nx_mnrs_driver_mcast_address[NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES]; + + volatile ethmac_t* ethmac; +} _nx_mnrs_network_driver_instance_type; + + +/* In this example, there are four instances of the MNRS ETH driver. + Therefore an array of four driver instances are created to keep track of + the interface information of each driver. */ +static _nx_mnrs_network_driver_instance_type nx_mnrs_driver[NX_MAX_MNRS_INTERFACES]; + + +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _nx_mnrs_network_driver PORTABLE C */ +/* 6.4.3 */ +/* AUTHOR */ +/* */ +/* Yuxin Zhou, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function acts as a virtual network for testing the NetX source */ +/* and driver concepts. User application may use this routine as */ +/* a template for the actual network driver. Note that this driver */ +/* simulates Ethernet operation. Some of the parameters don't apply */ +/* for non-Ethernet interfaces. */ +/* */ +/* INPUT */ +/* */ +/* ip_ptr Pointer to IP protocol block */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* _nx_mnrs_network_driver_output Send physical packet out */ +/* */ +/* CALLED BY */ +/* */ +/* NetX IP processing */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 05-19-2020 Yuxin Zhou Initial Version 6.0 */ +/* 09-30-2020 Yuxin Zhou Modified comment(s), */ +/* resulting in version 6.1 */ +/* 10-15-2021 Yuxin Zhou Modified comment(s), and */ +/* added sample of returning */ +/* link's interface type, */ +/* resulting in version 6.1.9 */ +/* 12-31-2023 Yajun Xia Modified comment(s), */ +/* supported VLAN and generic */ +/* link layer, */ +/* resulting in version 6.4.0 */ +/* */ +/**************************************************************************/ +VOID _nx_mnrs_network_driver(NX_IP_DRIVER *driver_req_ptr) +{ +UINT i = 0; +NX_IP *ip_ptr; +NX_PACKET *packet_ptr; +NX_INTERFACE *interface_ptr; +UINT interface_index; +USHORT ether_type; +#ifndef NX_ENABLE_VLAN +ULONG *ethernet_frame_ptr; +#endif /* NX_ENABLE_VLAN */ + + /* Setup the IP pointer from the driver request. */ + ip_ptr = driver_req_ptr -> nx_ip_driver_ptr; + /* Default to successful return. */ + driver_req_ptr -> nx_ip_driver_status = NX_SUCCESS; +#ifdef NX_ENABLE_VLAN + /* Let link layer to preprocess the driver request and return actual interface. */ + if (nx_link_driver_request_preprocess(driver_req_ptr, &interface_ptr) != NX_SUCCESS) + { + return; + } +#else + /* Setup interface pointer. */ + interface_ptr = driver_req_ptr -> nx_ip_driver_interface; +#endif /* NX_ENABLE_VLAN */ + /* Obtain the index number of the network interface. */ + interface_index = interface_ptr -> nx_interface_index; + /* Find out the driver interface if the driver command is not ATTACH. */ + if (driver_req_ptr -> nx_ip_driver_command != NX_LINK_INTERFACE_ATTACH) + { + for (i = 0; i < NX_MAX_MNRS_INTERFACES; i++) + { + if (nx_mnrs_driver[i].nx_mnrs_network_driver_in_use == 0) + { + continue; + } + if (nx_mnrs_driver[i].nx_mnrs_driver_ip_ptr != ip_ptr) + { + continue; + } + if (nx_mnrs_driver[i].nx_mnrs_driver_interface_ptr == interface_ptr) + { + break; + } + } + if (i == NX_MAX_MNRS_INTERFACES) + { + driver_req_ptr -> nx_ip_driver_status = NX_INVALID_INTERFACE; + return; + } + } + /* Process according to the driver request type in the IP control + block. */ + switch (driver_req_ptr -> nx_ip_driver_command) + { + case NX_LINK_INTERFACE_ATTACH: + { + /* Find an available driver instance to attach the interface. */ + for (i = 0; i < NX_MAX_MNRS_INTERFACES; i++) + { + if (nx_mnrs_driver[i].nx_mnrs_network_driver_in_use == 0) + { + break; + } + } + /* An available entry is found. */ + if (i < NX_MAX_MNRS_INTERFACES) + { + /* Set the IN USE flag.*/ + nx_mnrs_driver[i].nx_mnrs_network_driver_in_use = 1; + nx_mnrs_driver[i].nx_mnrs_network_driver_id = i; + /* Record the interface attached to the IP instance. */ + nx_mnrs_driver[i].nx_mnrs_driver_interface_ptr = interface_ptr; + /* Record the IP instance. */ + nx_mnrs_driver[i].nx_mnrs_driver_ip_ptr = ip_ptr; + /* store the mac address */ + nx_mnrs_driver[i].nx_mnrs_driver_mac_address.nx_mac_address_msw = mnrs_mac_address_msw; + nx_mnrs_driver[i].nx_mnrs_driver_mac_address.nx_mac_address_lsw = mnrs_mac_address_lsw + i; + /* Record the peripheral address */ + switch(i) { + case 0: + nx_mnrs_driver[i].ethmac = ethmac0; + break; +#if defined(ethmac1) + case 1: + nx_mnrs_driver[i].ethmac = ethmac1; + break; +#endif +#if defined(ethmac2) + case 2: + nx_mnrs_driver[i].ethmac = ethmac2; +#endif +#if defined(ethmac3) + case 2: + nx_mnrs_driver[i].ethmac = ethmac3; +#endif + default: + } + } + else + { + driver_req_ptr -> nx_ip_driver_status = NX_INVALID_INTERFACE; + } + break; + } + case NX_LINK_INTERFACE_DETACH: + { + /* Zero out the driver instance. */ + memset(&(nx_mnrs_driver[i]), 0, sizeof(_nx_mnrs_network_driver_instance_type)); + break; + } + case NX_LINK_INITIALIZE: + { + /* Device driver shall initialize the Ethernet Controller here. */ +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver Initialization - %s\n", ip_ptr -> nx_ip_name); + printf(" IP Address =%08X\n", ip_ptr -> nx_ip_address); +#endif + // enable TX & RX path + set_ethmac_mac_ctrl(nx_mnrs_driver[i].ethmac, + get_ethmac_mac_ctrl(nx_mnrs_driver[i].ethmac) & ~(ETHMAC_MAC_CTRL_TX_FLUSH(1)| ETHMAC_MAC_CTRL_RX_FLUSH(1))); + register_irq_handler(ETH0_IRQ, &_nx_mnrs_eth_recv_packet_eth0); + register_irq_handler(ETH1_IRQ, &_nx_mnrs_eth_recv_packet_eth1); + csr_read_set_bits_mie(1< nx_interface_link_up = NX_TRUE; +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver Link Enabled - %s\n", ip_ptr -> nx_ip_name); +#endif + break; + } + case NX_LINK_DISABLE: + { + /* Process driver link disable. This command indicates the IP layer + is not going to transmit any IP datagrams, nor does it expect any + IP datagrams from the interface. Therefore after processing this command, + the device driver shall not send any incoming packets to the IP + layer. Optionally the device driver may turn off the interface. */ + // set_ethmac_int_ctrl_reg_tx_free_intr_enable(nx_mnrs_driver[i].ethmac, 0); + set_ethmac_mac_intr_rx_data_avail_intr_enable(nx_mnrs_driver[i].ethmac, 0); + /* In the MNRS ETH driver, just clear the enabled flag. */ + interface_ptr -> nx_interface_link_up = NX_FALSE; +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver Link Disabled - %s\n", ip_ptr -> nx_ip_name); +#endif + break; + } + case NX_LINK_PACKET_SEND: + case NX_LINK_PACKET_BROADCAST: + case NX_LINK_ARP_SEND: + case NX_LINK_ARP_RESPONSE_SEND: + case NX_LINK_RARP_SEND: + { + /* + The IP stack sends down a data packet for transmission. + The device driver needs to prepend a MAC header, and fill in the + Ethernet frame type (assuming Ethernet protocol for network transmission) + based on the type of packet being transmitted. + + The following sequence illustrates this process. + */ + /* Place the ethernet frame at the front of the packet. */ + packet_ptr = driver_req_ptr -> nx_ip_driver_packet; + /* Get Ethernet type. */ + if (driver_req_ptr -> nx_ip_driver_command == NX_LINK_ARP_SEND) + { + ether_type = NX_ETHERNET_ARP; + } + else if (driver_req_ptr -> nx_ip_driver_command == NX_LINK_ARP_RESPONSE_SEND) + { + ether_type = NX_ETHERNET_ARP; + } + else if (driver_req_ptr -> nx_ip_driver_command == NX_LINK_RARP_SEND) + { + ether_type = NX_ETHERNET_RARP; + } + else if (packet_ptr -> nx_packet_ip_version == 4) + { + ether_type = NX_ETHERNET_IP; + } + else + { + ether_type = NX_ETHERNET_IPV6; + } +#ifdef NX_ENABLE_VLAN + /* Add Ethernet header. */ + if (nx_link_ethernet_header_add(ip_ptr, + driver_req_ptr -> nx_ip_driver_interface -> nx_interface_index, packet_ptr, + driver_req_ptr -> nx_ip_driver_physical_address_msw, + driver_req_ptr -> nx_ip_driver_physical_address_lsw, + (UINT)ether_type)) + { + /* Release the packet. */ + nx_packet_transmit_release(packet_ptr); + break; + } +#else + /* Adjust the prepend pointer. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr - NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length + NX_ETHERNET_SIZE; + /* Setup the ethernet frame pointer to build the ethernet frame. Backup another 2 + bytes to get 32-bit word alignment. */ + /*lint -e{927} -e{826} suppress cast of pointer to pointer, since it is necessary */ + ethernet_frame_ptr = (ULONG *)(packet_ptr -> nx_packet_prepend_ptr - 2); + /* Build the ethernet frame. */ + *ethernet_frame_ptr = driver_req_ptr -> nx_ip_driver_physical_address_msw; + *(ethernet_frame_ptr + 1) = driver_req_ptr -> nx_ip_driver_physical_address_lsw; + *(ethernet_frame_ptr + 2) = (interface_ptr -> nx_interface_physical_address_msw << 16) | + (interface_ptr -> nx_interface_physical_address_lsw >> 16); + *(ethernet_frame_ptr + 3) = (interface_ptr -> nx_interface_physical_address_lsw << 16) | ether_type; + /* Endian swapping if NX_LITTLE_ENDIAN is defined. */ + NX_CHANGE_ULONG_ENDIAN(*(ethernet_frame_ptr)); + NX_CHANGE_ULONG_ENDIAN(*(ethernet_frame_ptr + 1)); + NX_CHANGE_ULONG_ENDIAN(*(ethernet_frame_ptr + 2)); + NX_CHANGE_ULONG_ENDIAN(*(ethernet_frame_ptr + 3)); +#endif /* NX_ENABLE_VLAN */ +#ifdef NX_DEBUG_PACKET + printf("NetX MNRS ETH Driver Packet Send - %s\n", ip_ptr -> nx_ip_name); +#endif + /* At this point, the packet is a complete Ethernet frame, ready to be transmitted. + The driver shall call the actual Ethernet transmit routine and put the packet + on the wire. + + In this example, the MNRS ETH network transmit routine is called. */ + _nx_mnrs_network_driver_output(packet_ptr, i); + break; + } +#ifdef NX_ENABLE_VLAN + case NX_LINK_RAW_PACKET_SEND: + { + /* Send raw packet out directly. */ + _nx_mnrs_network_driver_output(driver_req_ptr -> nx_ip_driver_packet, i); + break; + } +#endif /* NX_ENABLE_VLAN */ + case NX_LINK_MULTICAST_JOIN: + { + UINT mcast_index; + /* The IP layer issues this command to join a multicast group. Note that + multicast operation is required for IPv6. + + On a typically Ethernet controller, the driver computes a hash value based + on MAC address, and programs the hash table. + + It is likely the driver also needs to maintain an internal MAC address table. + Later if a multicast address is removed, the driver needs + to reprogram the hash table based on the remaining multicast MAC addresses. */ + + /* The following procedure only applies to our MNRS ETH network driver, which manages + multicast MAC addresses by a simple look up table. */ + for (mcast_index = 0; mcast_index < NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES; mcast_index++) + { + if (nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_msw == 0 && + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_lsw == 0) + { + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_msw = driver_req_ptr -> nx_ip_driver_physical_address_msw; + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_lsw = driver_req_ptr -> nx_ip_driver_physical_address_lsw; + break; + } + } + if (mcast_index == NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES) + { + driver_req_ptr -> nx_ip_driver_status = NX_NO_MORE_ENTRIES; + } + break; + } + case NX_LINK_MULTICAST_LEAVE: + { + UINT mcast_index; + /* The IP layer issues this command to remove a multicast MAC address from the + receiving list. A device driver shall properly remove the multicast address + from the hash table, so the hardware does not receive such traffic. Note that + in order to reprogram the hash table, the device driver may have to keep track of + current active multicast MAC addresses. */ + + /* The following procedure only applies to our MNRS ETH network driver, which manages + multicast MAC addresses by a simple look up table. */ + for (mcast_index = 0; mcast_index < NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES; mcast_index++) + { + if (nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_msw == driver_req_ptr -> nx_ip_driver_physical_address_msw && + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_lsw == driver_req_ptr -> nx_ip_driver_physical_address_lsw) + { + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_msw = 0; + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_lsw = 0; + break; + } + } + if (mcast_index == NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES) + { + driver_req_ptr -> nx_ip_driver_status = NX_ENTRY_NOT_FOUND; + } + break; + } + case NX_LINK_GET_STATUS: + { + /* Return the link status in the supplied return pointer. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = ip_ptr -> nx_ip_interface[0].nx_interface_link_up; + break; + } + case NX_LINK_GET_SPEED: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_DUPLEX_TYPE: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_ERROR_COUNT: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_RX_COUNT: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_TX_COUNT: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_ALLOC_ERRORS: + { + /* Return the link's line speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_LINK_GET_INTERFACE_TYPE: + { + /* Return the link's interface type in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = NX_INTERFACE_TYPE_UNKNOWN; + break; + } + case NX_LINK_DEFERRED_PROCESSING: + { + /* Driver defined deferred processing. This is typically used to defer interrupt + processing to the thread level. + + A typical use case of this command is: + On receiving an Ethernet frame, the RX ISR does not process the received frame, + but instead records such an event in its internal data structure, and issues + a notification to the IP stack (the driver sends the notification to the IP + helping thread by calling "_nx_ip_driver_deferred_processing()". When the IP stack + gets a notification of a pending driver deferred process, it calls the + driver with the NX_LINK_DEFERRED_PROCESSING command. The driver shall complete + the pending receive process. + */ + + /* The MNRS ETH driver doesn't require a deferred process so it breaks out of + the switch case. */ + break; + } + case NX_LINK_SET_PHYSICAL_ADDRESS: + { + /* Find an driver instance to attach the interface. */ + for (i = 0; i < NX_MAX_MNRS_INTERFACES; i++) + { + if (nx_mnrs_driver[i].nx_mnrs_driver_interface_ptr == interface_ptr) + { + break; + } + } + /* An available entry is found. */ + if (i < NX_MAX_MNRS_INTERFACES) + { + /* Set the physical address. */ + nx_mnrs_driver[i].nx_mnrs_driver_mac_address.nx_mac_address_msw = driver_req_ptr -> nx_ip_driver_physical_address_msw; + nx_mnrs_driver[i].nx_mnrs_driver_mac_address.nx_mac_address_lsw = driver_req_ptr -> nx_ip_driver_physical_address_lsw; + } + else + { + driver_req_ptr -> nx_ip_driver_status = NX_INVALID_INTERFACE; + } + break; + } +#ifdef NX_ENABLE_INTERFACE_CAPABILITY + case NX_INTERFACE_CAPABILITY_GET: + { + /* Return the capability of the Ethernet controller speed in the supplied return pointer. Unsupported feature. */ + *(driver_req_ptr -> nx_ip_driver_return_ptr) = 0; + break; + } + case NX_INTERFACE_CAPABILITY_SET: + { + /* Set the capability of the Ethernet controller. Unsupported feature. */ + break; + } +#endif /* NX_ENABLE_INTERFACE_CAPABILITY */ + default: + /* Invalid driver request. */ + + /* Return the unhandled command status. */ + driver_req_ptr -> nx_ip_driver_status = NX_UNHANDLED_COMMAND; +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver Received invalid request - %s\n", ip_ptr -> nx_ip_name); +#endif + break; + } +} + +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _nx_mnrs_network_driver_output PORTABLE C */ +/* 6.4.3 */ +/* AUTHOR */ +/* */ +/* Yuxin Zhou, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function simply sends the packet to the IP instance on the */ +/* created IP list that matches the physical destination specified in */ +/* the Ethernet packet. In a real hardware setting, this routine */ +/* would simply put the packet out on the wire. */ +/* */ +/* INPUT */ +/* */ +/* packet_ptr Packet pointer */ +/* interface_instance_id ID of driver instance */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* nx_packet_copy Copy a packet */ +/* nx_packet_transmit_release Release a packet */ +/* _nx_mnrs_network_driver_receive ETH driver receive processing */ +/* */ +/* CALLED BY */ +/* */ +/* NetX IP processing */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 05-19-2020 Yuxin Zhou Initial Version 6.0 */ +/* 09-30-2020 Yuxin Zhou Modified comment(s), */ +/* resulting in version 6.1 */ +/* 12-31-2023 Yajun Xia Modified comment(s), */ +/* supported VLAN and generic */ +/* link layer, */ +/* resulting in version 6.4.0 */ +/* */ +/**************************************************************************/ +void _nx_mnrs_network_driver_output(NX_PACKET *packet_ptr, UINT interface_instance_id) +{ +UINT old_threshold = 0; + +#ifdef NX_DEBUG_PACKET +UCHAR *ptr; +UINT i, j; + + ptr = packet_ptr -> nx_packet_prepend_ptr; + printf("Ethernet Packet: "); + for (j = 0; j < 6; j++) + { + printf("%02X", *ptr++); + } + printf(" "); + for (j = 0; j < 6; j++) + { + printf("%02X", *ptr++); + } + printf(" %02X", *ptr++); + printf("%02X ", *ptr++); + + i = 0; + for (j = 0; j < (packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE); j++) + { + printf("%02X", *ptr++); + i++; + if (i > 3) + { + i = 0; + printf(" "); + } + } + printf("\n"); +#endif + + /* Disable preemption. */ + tx_thread_preemption_change(tx_thread_identify(), 0, &old_threshold); + _nx_mnrs_eth_send_packet(packet_ptr, nx_mnrs_driver[interface_instance_id].ethmac); +#ifdef NX_ENABLE_VLAN + /* Release the packet. */ + nx_link_packet_transmitted(nx_mnrs_driver[interface_instance_id].nx_mnrs_driver_ip_ptr, + nx_mnrs_driver[interface_instance_id].nx_mnrs_driver_interface_ptr -> nx_interface_index, + packet_ptr, NX_NULL); +#else + /* Remove the Ethernet header. In real hardware environments, this is typically + done after a transmit complete interrupt. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + /* Now that the Ethernet frame has been removed, release the packet. */ + nx_packet_transmit_release(packet_ptr); +#endif /* NX_ENABLE_VLAN */ + /* Restore preemption. */ + tx_thread_preemption_change(tx_thread_identify(), old_threshold, &old_threshold); +} + + +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _nx_mnrs_network_driver_receive PORTABLE C */ +/* 6.4.3 */ +/* AUTHOR */ +/* */ +/* Yuxin Zhou, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function processing incoming packets. This routine needs to be */ +/* be called from the receive packet ISR. */ +/* */ +/* INPUT */ +/* */ +/* ip_ptr Pointer to IP protocol block */ +/* packet_ptr Packet pointer */ +/* interface_instance_id The interface ID the packet is*/ +/* destined for */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* _nx_ip_packet_receive IP receive packet processing */ +/* _nx_ip_packet_deferred_receive IP deferred receive packet */ +/* processing */ +/* _nx_arp_packet_deferred_receive ARP receive processing */ +/* _nx_rarp_packet_deferred_receive RARP receive processing */ +/* nx_packet_release Packet release */ +/* */ +/* CALLED BY */ +/* */ +/* NetX IP processing */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 05-19-2020 Yuxin Zhou Initial Version 6.0 */ +/* 09-30-2020 Yuxin Zhou Modified comment(s), */ +/* resulting in version 6.1 */ +/* 12-31-2023 Yajun Xia Modified comment(s), */ +/* supported VLAN and generic */ +/* link layer, */ +/* resulting in version 6.4.0 */ +/* */ +/**************************************************************************/ +void _nx_mnrs_network_driver_receive(NX_IP *ip_ptr, NX_PACKET *packet_ptr, UINT interface_instance_id) +{ +UINT packet_type; +#ifdef NX_ENABLE_VLAN + nx_link_ethernet_packet_received(ip_ptr, + nx_mnrs_driver[interface_instance_id].nx_mnrs_driver_interface_ptr -> nx_interface_index, + packet_ptr, NX_NULL); +#else + + /* Pickup the packet header to determine where the packet needs to be + sent. */ + packet_type = (((UINT)(*(packet_ptr -> nx_packet_prepend_ptr + 12))) << 8) | + ((UINT)(*(packet_ptr -> nx_packet_prepend_ptr + 13))); + /* Setup interface pointer. */ + packet_ptr -> nx_packet_address.nx_packet_interface_ptr = nx_mnrs_driver[interface_instance_id].nx_mnrs_driver_interface_ptr; + /* Route the incoming packet according to its ethernet type. */ + /* The MNRS ETH driver accepts both IPv4 and IPv6 frames. */ + if ((packet_type == NX_ETHERNET_IP) || (packet_type == NX_ETHERNET_IPV6)) + { + /* Note: The length reported by some Ethernet hardware includes bytes after the packet + as well as the Ethernet header. In some cases, the actual packet length after the + Ethernet header should be derived from the length in the IP header (lower 16 bits of + the first 32-bit word). */ + + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + /* Route to the ip receive function. */ +#ifdef NX_DEBUG_PACKET + printf("NetX MNRS ETH Driver IP Packet Receive - %s\n", ip_ptr -> nx_ip_name); +#endif +#ifdef NX_DIRECT_ISR_CALL + _nx_ip_packet_receive(ip_ptr, packet_ptr); +#else + _nx_ip_packet_deferred_receive(ip_ptr, packet_ptr); +#endif + } +#ifndef NX_DISABLE_IPV4 + else if (packet_type == NX_ETHERNET_ARP) + { + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + /* Route to the ARP receive function. */ +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver ARP Receive - %s\n", ip_ptr -> nx_ip_name); +#endif + _nx_arp_packet_deferred_receive(ip_ptr, packet_ptr); + } + else if (packet_type == NX_ETHERNET_RARP) + { + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + /* Route to the RARP receive function. */ +#ifdef NX_DEBUG + printf("NetX MNRS ETH Driver RARP Receive - %s\n", ip_ptr -> nx_ip_name); +#endif + _nx_rarp_packet_deferred_receive(ip_ptr, packet_ptr); + } +#endif /* !NX_DISABLE_IPV4 */ + else + { + /* Invalid ethernet header... release the packet. */ + nx_packet_release(packet_ptr); + } +#endif /* NX_ENABLE_VLAN */ +} + +UINT _nx_mnrs_eth_send_packet(NX_PACKET *packet_ptr, volatile ethmac_t* ethmac){ +ULONG size = 0; +UCHAR *data; +UINT i; +ULONG buffer; +ULONG words; + + /* Make sure the data length is less than MTU. */ + if (packet_ptr -> nx_packet_length > NX_MAX_PACKET_SIZE) + { + return NX_NOT_SUCCESSFUL; + } + /* get data pointer to be transmitted. */ + data = packet_ptr -> nx_packet_prepend_ptr; + size = packet_ptr -> nx_packet_length; + words = (size+3)/4; + while(get_ethmac_mac_intr_rx_data_avail_intr_enable(ethmac) == 0); + set_ethmac_mac_tx(ethmac, size*8); + for(i=0; inx_mac_address_msw && + nx_mnrs_driver[i].nx_mnrs_driver_mac_address.nx_mac_address_lsw == mac_addr -> nx_mac_address_lsw) + { + return 1; + } + return 0; +} + +UINT mcast_mac_addr_hit_idx(UINT i, MAC_ADDRESS*mac_addr) { + UINT mcast_index; + for (mcast_index = 0; mcast_index < NX_MNRS_DRIVER_MAX_MCAST_ADDRESSES; mcast_index++) + { + if (nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_msw == mac_addr->nx_mac_address_msw && + nx_mnrs_driver[i].nx_mnrs_driver_mcast_address[mcast_index].nx_mac_address_lsw == mac_addr -> nx_mac_address_lsw) + { + return i; + } + } + return -1; +} + +static UCHAR nx_mnrs_receive_buffer[NX_MAX_PACKET_SIZE]; +VOID _nx_mnrs_eth_recv_packet(UINT id, volatile ethmac_t* ethmac){ + UINT bit_size; + UINT word_size; + UINT byte_size; + UINT i; + UINT word; + UCHAR buffer[16]; + MAC_ADDRESS to_mac; + MAC_ADDRESS from_mac; + UINT status; + NX_PACKET* packet_ptr; + NX_IP* ip_ptr; + UCHAR* data; + UINT bytes_received; + UINT address_len; + UINT packet_type; + + set_ethmac_mac_intr_rx_data_avail_intr_enable(ethmac, 0); + bit_size = get_ethmac_mac_rx(ethmac); + word_size = (bit_size+31)/32; + byte_size = (bit_size+7)/8; + if(word_size<4){ + set_ethmac_mac_intr_rx_data_avail_intr_enable(ethmac, 1); + return; + } + for( i = 0;i < 4;i++){ + while(!get_ethmac_mac_ctrl_rx_pending(ethmac)) + ; + // be carefull of unaligned accesses + word = get_ethmac_mac_rx(ethmac); + memcpy(buffer+i*sizeof(UINT), &word, sizeof(UINT)); + } + to_mac.nx_mac_address_msw=(buffer[0]<<8) + buffer[1]; + to_mac.nx_mac_address_lsw=(buffer[2]<<24) + (buffer[3]<<16) + (buffer[4]<<8) + buffer[5]; + from_mac.nx_mac_address_msw=(buffer[6]<<8) + buffer[7]; + from_mac.nx_mac_address_lsw=(buffer[8]<<24) + (buffer[9]<<16) + (buffer[10]<<8) + buffer[11]; + // _tx_thread_context_save(); + for(i=0; inx_ip_default_packet_pool, + &packet_ptr, word_size*4 /*NX_RECEIVE_PACKET*/, NX_NO_WAIT); + if (status) + { + packet_ptr = NX_NULL; + data = nx_mnrs_receive_buffer; + } + else if (ip_ptr -> nx_ip_default_packet_pool -> nx_packet_pool_payload_size >= (NX_LINK_MTU + 2)) + { + data = packet_ptr -> nx_packet_prepend_ptr + 2; + } + else + { + data = nx_mnrs_receive_buffer; + } + /* read the data from the rx buffer */ + memcpy(data, buffer, 16); + for( i = 4;i < word_size;i++){ + while(!get_ethmac_mac_ctrl_rx_pending(ethmac)) + ; + // be carefull of unaligned accesses + word = get_ethmac_mac_rx(ethmac); + memcpy(data+i*sizeof(UINT), &word, sizeof(UINT)); + } + if (packet_ptr == NX_NULL) + { + /* No packet available. Drop it and continue. */ + return; + } + /* Make sure IP header is 4-byte aligned. */ + packet_ptr -> nx_packet_prepend_ptr += 2; + packet_ptr -> nx_packet_append_ptr += 2; + if (data == nx_mnrs_receive_buffer) + { + /* Copy data into packet. */ + status = nx_packet_data_append(packet_ptr, (VOID *)data, bytes_received, + ip_ptr -> nx_ip_default_packet_pool, NX_NO_WAIT); + if (status) + { + nx_packet_release(packet_ptr); + // _tx_thread_context_restore(); + return; + } + } + else + { + packet_ptr -> nx_packet_length = (ULONG)bytes_received; + packet_ptr -> nx_packet_append_ptr += (ULONG)bytes_received; + } + /* Pickup the packet header to determine where the packet needs to be sent. */ + packet_type = (((UINT)(*(packet_ptr -> nx_packet_prepend_ptr + 12))) << 8) | + ((UINT)(*(packet_ptr -> nx_packet_prepend_ptr + 13))); + /* Route the incoming packet according to its ethernet type. */ + if ((packet_type == NX_ETHERNET_IP) || (packet_type == NX_ETHERNET_IPV6)) + { + /* Note: The length reported by some Ethernet hardware includes bytes after the packet + as well as the Ethernet header. In some cases, the actual packet length after the + Ethernet header should be derived from the length in the IP header (lower 16 bits of + the first 32-bit word). */ + + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + _nx_ip_packet_deferred_receive(ip_ptr, packet_ptr); + } + else if (packet_type == NX_ETHERNET_ARP) + { + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + _nx_arp_packet_deferred_receive(ip_ptr, packet_ptr); + } + else if (packet_type == NX_ETHERNET_RARP) + { + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + _nx_rarp_packet_deferred_receive(ip_ptr, packet_ptr); + } +#ifdef NX_ENABLE_PPPOE + else if ((packet_type == NX_ETHERNET_PPPOE_DISCOVERY) || + (packet_type == NX_ETHERNET_PPPOE_SESSION)) + { + /* Clean off the Ethernet header. */ + packet_ptr -> nx_packet_prepend_ptr = packet_ptr -> nx_packet_prepend_ptr + NX_ETHERNET_SIZE; + /* Adjust the packet length. */ + packet_ptr -> nx_packet_length = packet_ptr -> nx_packet_length - NX_ETHERNET_SIZE; + /* Route to the PPPoE receive function. */ + _nx_pppoe_packet_deferred_receive(packet_ptr); + } +#endif + else + { + /* Invalid ethernet header... release the packet. */ + nx_packet_release(packet_ptr); + } + } + // _tx_thread_context_restore(); + set_ethmac_mac_intr_rx_data_avail_intr_enable(ethmac, 1); +} + +VOID _nx_mnrs_eth_recv_packet_eth0(VOID){ + if(get_ethmac_mac_ctrl_rx_pending(ethmac0)) { + _nx_mnrs_eth_recv_packet(0, ethmac0); + } +} + +VOID _nx_mnrs_eth_recv_packet_eth1(VOID){ + if(get_ethmac_mac_ctrl_rx_pending(ethmac1)) { + _nx_mnrs_eth_recv_packet(1, ethmac1); + } +} + diff --git a/port/moonlight/platform.h b/port/moonlight/platform.h new file mode 100644 index 0000000..1194f7e --- /dev/null +++ b/port/moonlight/platform.h @@ -0,0 +1,24 @@ +#include "uart.h" +#include "gen/ethmac.h" +#include "aclint.h" +#include "riscv-csr.h" +#include "riscv-traps.h" + +#define PERIPH(TYPE, ADDR) ((volatile TYPE*)(ADDR)) +#define APB_BASE 0x10000000 +#define uart PERIPH(uart_t, APB_BASE + 0x01000) +#define aclint PERIPH(aclint_t, APB_BASE + 0x30000) +#define ethmac0 PERIPH(ethmac_t, 0x18000000) +#define ethmac1 PERIPH(ethmac_t, 0x18001000) + +#define UART0_IRQ 16 +#define TIMER0_IRQ0 17 +#define TIMER0_IRQ1 18 +#define QSPI_IRQ 19 +#define I2S_IRQ 20 +#define CAM_IRQ 21 +#define DMA_IRQ 22 +#define GPIO_ORQ 23 +#define ETH0_IRQ 24 +#define ETH1_IRQ 25 + diff --git a/port/moonlight/riscv-csr.h b/port/moonlight/riscv-csr.h new file mode 100644 index 0000000..e73bd78 --- /dev/null +++ b/port/moonlight/riscv-csr.h @@ -0,0 +1,3791 @@ +/* + Register access functions for RISC-V system registers. + SPDX-License-Identifier: Unlicense + + https://five-embeddev.com/ + +*/ + +#ifndef RISCV_CSR_H +#define RISCV_CSR_H + +#include + +#if __riscv_xlen==32 +typedef uint32_t uint_xlen_t; +typedef uint32_t uint_csr32_t; +typedef uint32_t uint_csr64_t; +#elif __riscv_xlen==64 +typedef uint64_t uint_xlen_t; +typedef uint32_t uint_csr32_t; +typedef uint64_t uint_csr64_t; +#else +#error "Unknown XLEN" +#endif + +// Test for Zicsr extension, if relevant +#if defined(__riscv_arch_test) +#if !defined(__riscv_zicsr) +#error "-march must include zicsr to access CSRs" +#endif +#endif + +/******************************************* + * misa - MRW - Machine ISA + */ +static inline uint_xlen_t csr_read_misa(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, misa" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_misa(uint_xlen_t value) { + __asm__ volatile ("csrw misa, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_misa(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, misa, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mvendorid - MRO - Machine Vendor ID + */ +static inline uint32_t csr_read_mvendorid(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mvendorid" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * marchid - MRO - Machine Architecture ID + */ +static inline uint_xlen_t csr_read_marchid(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, marchid" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * mimpid - MRO - Machine Implementation ID + */ +static inline uint_xlen_t csr_read_mimpid(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mimpid" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * mhartid - MRO - Hardware Thread ID + */ +static inline uint_xlen_t csr_read_mhartid(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhartid" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * mstatus - MRW - Machine Status + */ +static inline uint_xlen_t csr_read_mstatus(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mstatus" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mstatus(uint_xlen_t value) { + __asm__ volatile ("csrw mstatus, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mstatus(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mstatus, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mstatus(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, mstatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mstatus(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, mstatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_mstatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, mstatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_mstatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, mstatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mstatus, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MSTATUS(VALUE) \ + __asm__ volatile ("csrrwi zero, mstatus, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mstatus, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MSTATUS(MASK) \ + __asm__ volatile ("csrrsi zero, mstatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mstatus, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MSTATUS(MASK) \ + __asm__ volatile ("csrrci zero, mstatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MSTATUS_MIE_BIT_OFFSET 3 +#define MSTATUS_MIE_BIT_WIDTH 1 +#define MSTATUS_MIE_BIT_MASK 0x8 +#define MSTATUS_MIE_ALL_SET_MASK 0x1 +#define MSTATUS_SIE_BIT_OFFSET 2 +#define MSTATUS_SIE_BIT_WIDTH 1 +#define MSTATUS_SIE_BIT_MASK 0x4 +#define MSTATUS_SIE_ALL_SET_MASK 0x1 +#define MSTATUS_MPIE_BIT_OFFSET 7 +#define MSTATUS_MPIE_BIT_WIDTH 1 +#define MSTATUS_MPIE_BIT_MASK 0x80 +#define MSTATUS_MPIE_ALL_SET_MASK 0x1 +#define MSTATUS_SPIE_BIT_OFFSET 5 +#define MSTATUS_SPIE_BIT_WIDTH 1 +#define MSTATUS_SPIE_BIT_MASK 0x20 +#define MSTATUS_SPIE_ALL_SET_MASK 0x1 +#define MSTATUS_MPRV_BIT_OFFSET 17 +#define MSTATUS_MPRV_BIT_WIDTH 1 +#define MSTATUS_MPRV_BIT_MASK 0x20000 +#define MSTATUS_MPRV_ALL_SET_MASK 0x1 +#define MSTATUS_MPP_BIT_OFFSET 11 +#define MSTATUS_MPP_BIT_WIDTH 2 +#define MSTATUS_MPP_BIT_MASK 0x1800 +#define MSTATUS_MPP_ALL_SET_MASK 0x3 +#define MSTATUS_SPP_BIT_OFFSET 8 +#define MSTATUS_SPP_BIT_WIDTH 1 +#define MSTATUS_SPP_BIT_MASK 0x100 +#define MSTATUS_SPP_ALL_SET_MASK 0x1 + +/******************************************* + * mstatush - MRW - Additional machine status register, RV32 only. + */ +static inline uint_xlen_t csr_read_mstatush(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mstatush" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mstatush(uint_xlen_t value) { + __asm__ volatile ("csrw mstatush, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mstatush(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mstatush, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mtvec - MRW - Machine Trap Vector Base Address + */ +static inline uint_xlen_t csr_read_mtvec(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mtvec" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mtvec(uint_xlen_t value) { + __asm__ volatile ("csrw mtvec, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mtvec(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mtvec, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mtvec(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, mtvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mtvec(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, mtvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_mtvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, mtvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_mtvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, mtvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mtvec, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MTVEC(VALUE) \ + __asm__ volatile ("csrrwi zero, mtvec, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mtvec, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MTVEC(MASK) \ + __asm__ volatile ("csrrsi zero, mtvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mtvec, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MTVEC(MASK) \ + __asm__ volatile ("csrrci zero, mtvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MTVEC_BASE_BIT_OFFSET 2 +#define MTVEC_BASE_BIT_WIDTH ((__riscv_xlen-1)-(2) + 1) +#define MTVEC_BASE_BIT_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (2)) +#define MTVEC_BASE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (0)) +#define MTVEC_MODE_BIT_OFFSET 0 +#define MTVEC_MODE_BIT_WIDTH 2 +#define MTVEC_MODE_BIT_MASK 0x3 +#define MTVEC_MODE_ALL_SET_MASK 0x3 + +/******************************************* + * medeleg - MRW - Machine Exception Delegation + */ +static inline uint_xlen_t csr_read_medeleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, medeleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_medeleg(uint_xlen_t value) { + __asm__ volatile ("csrw medeleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_medeleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, medeleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mideleg - MRW - Machine Interrupt Delegation + */ +static inline uint_xlen_t csr_read_mideleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mideleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mideleg(uint_xlen_t value) { + __asm__ volatile ("csrw mideleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mideleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mideleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mip - MRW - Machine Interrupt Pending + */ +static inline uint_xlen_t csr_read_mip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mip(uint_xlen_t value) { + __asm__ volatile ("csrw mip, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mip(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mip, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mip(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, mip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mip(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, mip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_mip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, mip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_mip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, mip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mip, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MIP(VALUE) \ + __asm__ volatile ("csrrwi zero, mip, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mip, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MIP(MASK) \ + __asm__ volatile ("csrrsi zero, mip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mip, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MIP(MASK) \ + __asm__ volatile ("csrrci zero, mip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MIP_MSI_BIT_OFFSET 3 +#define MIP_MSI_BIT_WIDTH 1 +#define MIP_MSI_BIT_MASK 0x8 +#define MIP_MSI_ALL_SET_MASK 0x1 +#define MIP_MTI_BIT_OFFSET 7 +#define MIP_MTI_BIT_WIDTH 1 +#define MIP_MTI_BIT_MASK 0x80 +#define MIP_MTI_ALL_SET_MASK 0x1 +#define MIP_MEI_BIT_OFFSET 11 +#define MIP_MEI_BIT_WIDTH 1 +#define MIP_MEI_BIT_MASK 0x800 +#define MIP_MEI_ALL_SET_MASK 0x1 +#define MIP_SSI_BIT_OFFSET 1 +#define MIP_SSI_BIT_WIDTH 1 +#define MIP_SSI_BIT_MASK 0x2 +#define MIP_SSI_ALL_SET_MASK 0x1 +#define MIP_STI_BIT_OFFSET 5 +#define MIP_STI_BIT_WIDTH 1 +#define MIP_STI_BIT_MASK 0x20 +#define MIP_STI_ALL_SET_MASK 0x1 +#define MIP_SEI_BIT_OFFSET 9 +#define MIP_SEI_BIT_WIDTH 1 +#define MIP_SEI_BIT_MASK 0x200 +#define MIP_SEI_ALL_SET_MASK 0x1 +#define MIP_USI_BIT_OFFSET 0 +#define MIP_USI_BIT_WIDTH 1 +#define MIP_USI_BIT_MASK 0x1 +#define MIP_USI_ALL_SET_MASK 0x1 +#define MIP_UTI_BIT_OFFSET 4 +#define MIP_UTI_BIT_WIDTH 1 +#define MIP_UTI_BIT_MASK 0x10 +#define MIP_UTI_ALL_SET_MASK 0x1 +#define MIP_UEI_BIT_OFFSET 8 +#define MIP_UEI_BIT_WIDTH 1 +#define MIP_UEI_BIT_MASK 0x100 +#define MIP_UEI_ALL_SET_MASK 0x1 +#define MIP_PLATFORM_DEFINED_BIT_OFFSET 16 +#define MIP_PLATFORM_DEFINED_BIT_WIDTH ((__riscv_xlen)-(16) + 1) +#define MIP_PLATFORM_DEFINED_BIT_MASK ((1UL<<(((__riscv_xlen)-(16) + 1)-1)) << (16)) +#define MIP_PLATFORM_DEFINED_ALL_SET_MASK ((1UL<<(((__riscv_xlen)-(16) + 1)-1)) << (0)) + +/******************************************* + * mie - MRW - Machine Interrupt Enable + */ +static inline uint_xlen_t csr_read_mie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mie(uint_xlen_t value) { + __asm__ volatile ("csrw mie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mie(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, mie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mie(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, mie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_mie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, mie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_mie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, mie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mie, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MIE(VALUE) \ + __asm__ volatile ("csrrwi zero, mie, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mie, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MIE(MASK) \ + __asm__ volatile ("csrrsi zero, mie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mie, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MIE(MASK) \ + __asm__ volatile ("csrrci zero, mie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MIE_MSI_BIT_OFFSET 3 +#define MIE_MSI_BIT_WIDTH 1 +#define MIE_MSI_BIT_MASK 0x8 +#define MIE_MSI_ALL_SET_MASK 0x1 +#define MIE_MTI_BIT_OFFSET 7 +#define MIE_MTI_BIT_WIDTH 1 +#define MIE_MTI_BIT_MASK 0x80 +#define MIE_MTI_ALL_SET_MASK 0x1 +#define MIE_MEI_BIT_OFFSET 11 +#define MIE_MEI_BIT_WIDTH 1 +#define MIE_MEI_BIT_MASK 0x800 +#define MIE_MEI_ALL_SET_MASK 0x1 +#define MIE_SSI_BIT_OFFSET 1 +#define MIE_SSI_BIT_WIDTH 1 +#define MIE_SSI_BIT_MASK 0x2 +#define MIE_SSI_ALL_SET_MASK 0x1 +#define MIE_STI_BIT_OFFSET 5 +#define MIE_STI_BIT_WIDTH 1 +#define MIE_STI_BIT_MASK 0x20 +#define MIE_STI_ALL_SET_MASK 0x1 +#define MIE_SEI_BIT_OFFSET 9 +#define MIE_SEI_BIT_WIDTH 1 +#define MIE_SEI_BIT_MASK 0x200 +#define MIE_SEI_ALL_SET_MASK 0x1 +#define MIE_USI_BIT_OFFSET 0 +#define MIE_USI_BIT_WIDTH 1 +#define MIE_USI_BIT_MASK 0x1 +#define MIE_USI_ALL_SET_MASK 0x1 +#define MIE_UTI_BIT_OFFSET 4 +#define MIE_UTI_BIT_WIDTH 1 +#define MIE_UTI_BIT_MASK 0x10 +#define MIE_UTI_ALL_SET_MASK 0x1 +#define MIE_UEI_BIT_OFFSET 8 +#define MIE_UEI_BIT_WIDTH 1 +#define MIE_UEI_BIT_MASK 0x100 +#define MIE_UEI_ALL_SET_MASK 0x1 +#define MIE_PLATFORM_DEFINED_BIT_OFFSET 16 +#define MIE_PLATFORM_DEFINED_BIT_WIDTH ((__riscv_xlen)-(16) + 1) +#define MIE_PLATFORM_DEFINED_BIT_MASK ((1UL<<(((__riscv_xlen)-(16) + 1)-1)) << (16)) +#define MIE_PLATFORM_DEFINED_ALL_SET_MASK ((1UL<<(((__riscv_xlen)-(16) + 1)-1)) << (0)) + +/******************************************* + * mcountinhibit - MRW - Machine Counter Inhibit + */ +static inline uint32_t csr_read_mcountinhibit(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mcountinhibit" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mcountinhibit(uint_csr32_t value) { + __asm__ volatile ("csrw mcountinhibit, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mcountinhibit(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mcountinhibit, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mcountinhibit(uint32_t mask) { + __asm__ volatile ("csrrs zero, mcountinhibit, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mcountinhibit(uint32_t mask) { + __asm__ volatile ("csrrc zero, mcountinhibit, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_set_bits_mcountinhibit(uint32_t mask) { + uint_csr32_t value; + __asm__ volatile ("csrrs %0, mcountinhibit, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint32_t csr_read_clr_bits_mcountinhibit(uint32_t mask) { + uint_csr32_t value; + __asm__ volatile ("csrrc %0, mcountinhibit, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mcountinhibit, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MCOUNTINHIBIT(VALUE) \ + __asm__ volatile ("csrrwi zero, mcountinhibit, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcountinhibit, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MCOUNTINHIBIT(MASK) \ + __asm__ volatile ("csrrsi zero, mcountinhibit, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcountinhibit, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MCOUNTINHIBIT(MASK) \ + __asm__ volatile ("csrrci zero, mcountinhibit, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MCOUNTINHIBIT_CY_BIT_OFFSET 0 +#define MCOUNTINHIBIT_CY_BIT_WIDTH 1 +#define MCOUNTINHIBIT_CY_BIT_MASK 0x1 +#define MCOUNTINHIBIT_CY_ALL_SET_MASK 0x1 +#define MCOUNTINHIBIT_IR_BIT_OFFSET 2 +#define MCOUNTINHIBIT_IR_BIT_WIDTH 1 +#define MCOUNTINHIBIT_IR_BIT_MASK 0x4 +#define MCOUNTINHIBIT_IR_ALL_SET_MASK 0x1 +#define MCOUNTINHIBIT_HPM_BIT_OFFSET 3 +#define MCOUNTINHIBIT_HPM_BIT_WIDTH 29 +#define MCOUNTINHIBIT_HPM_BIT_MASK 0xfffffff8 +#define MCOUNTINHIBIT_HPM_ALL_SET_MASK 0x1fffffff + +/******************************************* + * mcycle - MRW - Clock Cycles Executed Counter + */ +static inline uint64_t csr_read_mcycle(void) { + uint_csr64_t value; + __asm__ volatile ("csrr %0, mcycle" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mcycle(uint_csr64_t value) { + __asm__ volatile ("csrw mcycle, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint64_t csr_read_write_mcycle(uint64_t new_value) { + uint_csr64_t prev_value; + __asm__ volatile ("csrrw %0, mcycle, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * minstret - MRW - Number of Instructions Retired Counter + */ +static inline uint64_t csr_read_minstret(void) { + uint_csr64_t value; + __asm__ volatile ("csrr %0, minstret" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_minstret(uint_csr64_t value) { + __asm__ volatile ("csrw minstret, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint64_t csr_read_write_minstret(uint64_t new_value) { + uint_csr64_t prev_value; + __asm__ volatile ("csrrw %0, minstret, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter3 - MRW - Event Counters + */ +static inline uint64_t csr_read_mhpmcounter3(void) { + uint_csr64_t value; + __asm__ volatile ("csrr %0, mhpmcounter3" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter3(uint_csr64_t value) { + __asm__ volatile ("csrw mhpmcounter3, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint64_t csr_read_write_mhpmcounter3(uint64_t new_value) { + uint_csr64_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter3, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmevent3 - MRW - Event Counter Event Select + */ +static inline uint_xlen_t csr_read_mhpmevent3(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhpmevent3" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmevent3(uint_xlen_t value) { + __asm__ volatile ("csrw mhpmevent3, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mhpmevent3(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mhpmevent3, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mcounteren - MRW - Counter Enable + */ +static inline uint32_t csr_read_mcounteren(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mcounteren" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mcounteren(uint_csr32_t value) { + __asm__ volatile ("csrw mcounteren, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mcounteren(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mcounteren, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mcounteren(uint32_t mask) { + __asm__ volatile ("csrrs zero, mcounteren, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mcounteren(uint32_t mask) { + __asm__ volatile ("csrrc zero, mcounteren, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_set_bits_mcounteren(uint32_t mask) { + uint_csr32_t value; + __asm__ volatile ("csrrs %0, mcounteren, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint32_t csr_read_clr_bits_mcounteren(uint32_t mask) { + uint_csr32_t value; + __asm__ volatile ("csrrc %0, mcounteren, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mcounteren, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MCOUNTEREN(VALUE) \ + __asm__ volatile ("csrrwi zero, mcounteren, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcounteren, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MCOUNTEREN(MASK) \ + __asm__ volatile ("csrrsi zero, mcounteren, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcounteren, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MCOUNTEREN(MASK) \ + __asm__ volatile ("csrrci zero, mcounteren, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MCOUNTEREN_CY_BIT_OFFSET 0 +#define MCOUNTEREN_CY_BIT_WIDTH 1 +#define MCOUNTEREN_CY_BIT_MASK 0x1 +#define MCOUNTEREN_CY_ALL_SET_MASK 0x1 +#define MCOUNTEREN_TM_BIT_OFFSET 1 +#define MCOUNTEREN_TM_BIT_WIDTH 1 +#define MCOUNTEREN_TM_BIT_MASK 0x2 +#define MCOUNTEREN_TM_ALL_SET_MASK 0x1 +#define MCOUNTEREN_IR_BIT_OFFSET 2 +#define MCOUNTEREN_IR_BIT_WIDTH 1 +#define MCOUNTEREN_IR_BIT_MASK 0x4 +#define MCOUNTEREN_IR_ALL_SET_MASK 0x1 +#define MCOUNTEREN_HPM_BIT_OFFSET 3 +#define MCOUNTEREN_HPM_BIT_WIDTH 29 +#define MCOUNTEREN_HPM_BIT_MASK 0xfffffff8 +#define MCOUNTEREN_HPM_ALL_SET_MASK 0x1fffffff + +/******************************************* + * scounteren - SRW - Counter Enable + */ +static inline uint_xlen_t csr_read_scounteren(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, scounteren" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_scounteren(uint_xlen_t value) { + __asm__ volatile ("csrw scounteren, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_scounteren(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, scounteren, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mscratch - MRW - Machine Mode Scratch Register + */ +static inline uint_xlen_t csr_read_mscratch(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mscratch" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mscratch(uint_xlen_t value) { + __asm__ volatile ("csrw mscratch, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mscratch(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mscratch, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mepc - MRW - Machine Exception Program Counter + */ +static inline uint_xlen_t csr_read_mepc(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mepc" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mepc(uint_xlen_t value) { + __asm__ volatile ("csrw mepc, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mepc(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mepc, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mcause - MRW - Machine Exception Cause + */ +static inline uint_xlen_t csr_read_mcause(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mcause" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mcause(uint_xlen_t value) { + __asm__ volatile ("csrw mcause, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mcause(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mcause, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_mcause(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, mcause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_mcause(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, mcause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_mcause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, mcause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_mcause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, mcause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* mcause, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_MCAUSE(VALUE) \ + __asm__ volatile ("csrrwi zero, mcause, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcause, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_MCAUSE(MASK) \ + __asm__ volatile ("csrrsi zero, mcause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* mcause, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_MCAUSE(MASK) \ + __asm__ volatile ("csrrci zero, mcause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define MCAUSE_INTERRUPT_BIT_OFFSET (__riscv_xlen-1) +#define MCAUSE_INTERRUPT_BIT_WIDTH 1 +#define MCAUSE_INTERRUPT_BIT_MASK (0x1UL << ((__riscv_xlen-1))) +#define MCAUSE_INTERRUPT_ALL_SET_MASK 0x1 +#define MCAUSE_EXCEPTION_CODE_BIT_OFFSET 0 +#define MCAUSE_EXCEPTION_CODE_BIT_WIDTH ((__riscv_xlen-2)-(0) + 1) +#define MCAUSE_EXCEPTION_CODE_BIT_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) +#define MCAUSE_EXCEPTION_CODE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) + +/******************************************* + * mtval - MRW - Machine Trap Value + */ +static inline uint_xlen_t csr_read_mtval(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mtval" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mtval(uint_xlen_t value) { + __asm__ volatile ("csrw mtval, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mtval(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mtval, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * sscratch - SRW - Supervisor Mode Scratch Register + */ +static inline uint_xlen_t csr_read_sscratch(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sscratch" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sscratch(uint_xlen_t value) { + __asm__ volatile ("csrw sscratch, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sscratch(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sscratch, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * sepc - SRW - Supervisor Exception Program Counter + */ +static inline uint_xlen_t csr_read_sepc(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sepc" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sepc(uint_xlen_t value) { + __asm__ volatile ("csrw sepc, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sepc(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sepc, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * scause - SRW - Supervisor Exception Cause + */ +static inline uint_xlen_t csr_read_scause(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, scause" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_scause(uint_xlen_t value) { + __asm__ volatile ("csrw scause, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_scause(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, scause, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_scause(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, scause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_scause(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, scause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_scause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, scause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_scause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, scause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* scause, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_SCAUSE(VALUE) \ + __asm__ volatile ("csrrwi zero, scause, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* scause, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_SCAUSE(MASK) \ + __asm__ volatile ("csrrsi zero, scause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* scause, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_SCAUSE(MASK) \ + __asm__ volatile ("csrrci zero, scause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define SCAUSE_INTERRUPT_BIT_OFFSET (__riscv_xlen-1) +#define SCAUSE_INTERRUPT_BIT_WIDTH 1 +#define SCAUSE_INTERRUPT_BIT_MASK (0x1UL << ((__riscv_xlen-1))) +#define SCAUSE_INTERRUPT_ALL_SET_MASK 0x1 +#define SCAUSE_EXCEPTION_CODE_BIT_OFFSET 0 +#define SCAUSE_EXCEPTION_CODE_BIT_WIDTH ((__riscv_xlen-2)-(0) + 1) +#define SCAUSE_EXCEPTION_CODE_BIT_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) +#define SCAUSE_EXCEPTION_CODE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) + +/******************************************* + * sstatus - SRW - Supervisor Status + */ +static inline uint_xlen_t csr_read_sstatus(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sstatus" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sstatus(uint_xlen_t value) { + __asm__ volatile ("csrw sstatus, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sstatus(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sstatus, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_sstatus(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, sstatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_sstatus(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, sstatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_sstatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, sstatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_sstatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, sstatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* sstatus, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_SSTATUS(VALUE) \ + __asm__ volatile ("csrrwi zero, sstatus, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* sstatus, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_SSTATUS(MASK) \ + __asm__ volatile ("csrrsi zero, sstatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* sstatus, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_SSTATUS(MASK) \ + __asm__ volatile ("csrrci zero, sstatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define SSTATUS_SIE_BIT_OFFSET 2 +#define SSTATUS_SIE_BIT_WIDTH 1 +#define SSTATUS_SIE_BIT_MASK 0x4 +#define SSTATUS_SIE_ALL_SET_MASK 0x1 +#define SSTATUS_SPIE_BIT_OFFSET 5 +#define SSTATUS_SPIE_BIT_WIDTH 1 +#define SSTATUS_SPIE_BIT_MASK 0x20 +#define SSTATUS_SPIE_ALL_SET_MASK 0x1 +#define SSTATUS_SPP_BIT_OFFSET 8 +#define SSTATUS_SPP_BIT_WIDTH 1 +#define SSTATUS_SPP_BIT_MASK 0x100 +#define SSTATUS_SPP_ALL_SET_MASK 0x1 + +/******************************************* + * stvec - SRW - Supervisor Trap Vector Base Address + */ +static inline uint_xlen_t csr_read_stvec(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, stvec" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_stvec(uint_xlen_t value) { + __asm__ volatile ("csrw stvec, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_stvec(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, stvec, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_stvec(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, stvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_stvec(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, stvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_stvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, stvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_stvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, stvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* stvec, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_STVEC(VALUE) \ + __asm__ volatile ("csrrwi zero, stvec, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* stvec, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_STVEC(MASK) \ + __asm__ volatile ("csrrsi zero, stvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* stvec, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_STVEC(MASK) \ + __asm__ volatile ("csrrci zero, stvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define STVEC_BASE_BIT_OFFSET 2 +#define STVEC_BASE_BIT_WIDTH ((__riscv_xlen-1)-(2) + 1) +#define STVEC_BASE_BIT_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (2)) +#define STVEC_BASE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (0)) +#define STVEC_MODE_BIT_OFFSET 0 +#define STVEC_MODE_BIT_WIDTH 2 +#define STVEC_MODE_BIT_MASK 0x3 +#define STVEC_MODE_ALL_SET_MASK 0x3 + +/******************************************* + * sideleg - SRW - Supervisor Interrupt Delegation + */ +static inline uint_xlen_t csr_read_sideleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sideleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sideleg(uint_xlen_t value) { + __asm__ volatile ("csrw sideleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sideleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sideleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * sedeleg - SRW - Supervisor Exception Delegation + */ +static inline uint_xlen_t csr_read_sedeleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sedeleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sedeleg(uint_xlen_t value) { + __asm__ volatile ("csrw sedeleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sedeleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sedeleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * sip - SRW - Supervisor Interrupt Pending + */ +static inline uint_xlen_t csr_read_sip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sip(uint_xlen_t value) { + __asm__ volatile ("csrw sip, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sip(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sip, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_sip(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, sip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_sip(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, sip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_sip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, sip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_sip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, sip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* sip, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_SIP(VALUE) \ + __asm__ volatile ("csrrwi zero, sip, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* sip, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_SIP(MASK) \ + __asm__ volatile ("csrrsi zero, sip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* sip, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_SIP(MASK) \ + __asm__ volatile ("csrrci zero, sip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define SIP_SSI_BIT_OFFSET 1 +#define SIP_SSI_BIT_WIDTH 1 +#define SIP_SSI_BIT_MASK 0x2 +#define SIP_SSI_ALL_SET_MASK 0x1 +#define SIP_STI_BIT_OFFSET 5 +#define SIP_STI_BIT_WIDTH 1 +#define SIP_STI_BIT_MASK 0x20 +#define SIP_STI_ALL_SET_MASK 0x1 +#define SIP_SEI_BIT_OFFSET 9 +#define SIP_SEI_BIT_WIDTH 1 +#define SIP_SEI_BIT_MASK 0x200 +#define SIP_SEI_ALL_SET_MASK 0x1 +#define SIP_USI_BIT_OFFSET 0 +#define SIP_USI_BIT_WIDTH 1 +#define SIP_USI_BIT_MASK 0x1 +#define SIP_USI_ALL_SET_MASK 0x1 +#define SIP_UTI_BIT_OFFSET 4 +#define SIP_UTI_BIT_WIDTH 1 +#define SIP_UTI_BIT_MASK 0x10 +#define SIP_UTI_ALL_SET_MASK 0x1 +#define SIP_UEI_BIT_OFFSET 8 +#define SIP_UEI_BIT_WIDTH 1 +#define SIP_UEI_BIT_MASK 0x100 +#define SIP_UEI_ALL_SET_MASK 0x1 + +/******************************************* + * sie - SRW - Supervisor Interrupt Enable + */ +static inline uint_xlen_t csr_read_sie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, sie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_sie(uint_xlen_t value) { + __asm__ volatile ("csrw sie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_sie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, sie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_sie(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, sie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_sie(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, sie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_sie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, sie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_sie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, sie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* sie, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_SIE(VALUE) \ + __asm__ volatile ("csrrwi zero, sie, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* sie, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_SIE(MASK) \ + __asm__ volatile ("csrrsi zero, sie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* sie, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_SIE(MASK) \ + __asm__ volatile ("csrrci zero, sie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define SIE_SSI_BIT_OFFSET 1 +#define SIE_SSI_BIT_WIDTH 1 +#define SIE_SSI_BIT_MASK 0x2 +#define SIE_SSI_ALL_SET_MASK 0x1 +#define SIE_STI_BIT_OFFSET 5 +#define SIE_STI_BIT_WIDTH 1 +#define SIE_STI_BIT_MASK 0x20 +#define SIE_STI_ALL_SET_MASK 0x1 +#define SIE_SEI_BIT_OFFSET 9 +#define SIE_SEI_BIT_WIDTH 1 +#define SIE_SEI_BIT_MASK 0x200 +#define SIE_SEI_ALL_SET_MASK 0x1 +#define SIE_USI_BIT_OFFSET 0 +#define SIE_USI_BIT_WIDTH 1 +#define SIE_USI_BIT_MASK 0x1 +#define SIE_USI_ALL_SET_MASK 0x1 +#define SIE_UTI_BIT_OFFSET 4 +#define SIE_UTI_BIT_WIDTH 1 +#define SIE_UTI_BIT_MASK 0x10 +#define SIE_UTI_ALL_SET_MASK 0x1 +#define SIE_UEI_BIT_OFFSET 8 +#define SIE_UEI_BIT_WIDTH 1 +#define SIE_UEI_BIT_MASK 0x100 +#define SIE_UEI_ALL_SET_MASK 0x1 + +/******************************************* + * ustatus - URW - User mode restricted view of mstatus + */ +static inline uint_xlen_t csr_read_ustatus(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, ustatus" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_ustatus(uint_xlen_t value) { + __asm__ volatile ("csrw ustatus, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_ustatus(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, ustatus, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_ustatus(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, ustatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_ustatus(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, ustatus, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_ustatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, ustatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_ustatus(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, ustatus, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* ustatus, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_USTATUS(VALUE) \ + __asm__ volatile ("csrrwi zero, ustatus, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* ustatus, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_USTATUS(MASK) \ + __asm__ volatile ("csrrsi zero, ustatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* ustatus, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_USTATUS(MASK) \ + __asm__ volatile ("csrrci zero, ustatus, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define USTATUS_UIE_BIT_OFFSET 1 +#define USTATUS_UIE_BIT_WIDTH 1 +#define USTATUS_UIE_BIT_MASK 0x2 +#define USTATUS_UIE_ALL_SET_MASK 0x1 +#define USTATUS_UPIE_BIT_OFFSET 3 +#define USTATUS_UPIE_BIT_WIDTH 1 +#define USTATUS_UPIE_BIT_MASK 0x8 +#define USTATUS_UPIE_ALL_SET_MASK 0x1 + +/******************************************* + * uip - URW - User Interrupt Pending + */ +static inline uint_xlen_t csr_read_uip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, uip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_uip(uint_xlen_t value) { + __asm__ volatile ("csrw uip, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_uip(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, uip, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_uip(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, uip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_uip(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, uip, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_uip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, uip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_uip(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, uip, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* uip, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_UIP(VALUE) \ + __asm__ volatile ("csrrwi zero, uip, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* uip, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_UIP(MASK) \ + __asm__ volatile ("csrrsi zero, uip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* uip, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_UIP(MASK) \ + __asm__ volatile ("csrrci zero, uip, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define UIP_USI_BIT_OFFSET 0 +#define UIP_USI_BIT_WIDTH 1 +#define UIP_USI_BIT_MASK 0x1 +#define UIP_USI_ALL_SET_MASK 0x1 +#define UIP_UTI_BIT_OFFSET 4 +#define UIP_UTI_BIT_WIDTH 1 +#define UIP_UTI_BIT_MASK 0x10 +#define UIP_UTI_ALL_SET_MASK 0x1 +#define UIP_UEI_BIT_OFFSET 8 +#define UIP_UEI_BIT_WIDTH 1 +#define UIP_UEI_BIT_MASK 0x100 +#define UIP_UEI_ALL_SET_MASK 0x1 + +/******************************************* + * uie - URW - User Interrupt Enable + */ +static inline uint_xlen_t csr_read_uie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, uie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_uie(uint_xlen_t value) { + __asm__ volatile ("csrw uie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_uie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, uie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_uie(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, uie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_uie(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, uie, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_uie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, uie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_uie(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, uie, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* uie, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_UIE(VALUE) \ + __asm__ volatile ("csrrwi zero, uie, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* uie, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_UIE(MASK) \ + __asm__ volatile ("csrrsi zero, uie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* uie, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_UIE(MASK) \ + __asm__ volatile ("csrrci zero, uie, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define UIE_USI_BIT_OFFSET 0 +#define UIE_USI_BIT_WIDTH 1 +#define UIE_USI_BIT_MASK 0x1 +#define UIE_USI_ALL_SET_MASK 0x1 +#define UIE_UTI_BIT_OFFSET 4 +#define UIE_UTI_BIT_WIDTH 1 +#define UIE_UTI_BIT_MASK 0x10 +#define UIE_UTI_ALL_SET_MASK 0x1 +#define UIE_UEI_BIT_OFFSET 8 +#define UIE_UEI_BIT_WIDTH 1 +#define UIE_UEI_BIT_MASK 0x100 +#define UIE_UEI_ALL_SET_MASK 0x1 + +/******************************************* + * uscratch - URW - User Mode Scratch Register + */ +static inline uint_xlen_t csr_read_uscratch(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, uscratch" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_uscratch(uint_xlen_t value) { + __asm__ volatile ("csrw uscratch, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_uscratch(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, uscratch, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * uepc - URW - User Exception Program Counter + */ +static inline uint_xlen_t csr_read_uepc(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, uepc" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_uepc(uint_xlen_t value) { + __asm__ volatile ("csrw uepc, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_uepc(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, uepc, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * ucause - URW - User Exception Cause + */ +static inline uint_xlen_t csr_read_ucause(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, ucause" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_ucause(uint_xlen_t value) { + __asm__ volatile ("csrw ucause, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_ucause(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, ucause, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_ucause(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, ucause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_ucause(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, ucause, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_ucause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, ucause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_ucause(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, ucause, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* ucause, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_UCAUSE(VALUE) \ + __asm__ volatile ("csrrwi zero, ucause, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* ucause, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_UCAUSE(MASK) \ + __asm__ volatile ("csrrsi zero, ucause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* ucause, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_UCAUSE(MASK) \ + __asm__ volatile ("csrrci zero, ucause, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define UCAUSE_INTERRUPT_BIT_OFFSET (__riscv_xlen-1) +#define UCAUSE_INTERRUPT_BIT_WIDTH 1 +#define UCAUSE_INTERRUPT_BIT_MASK (0x1UL << ((__riscv_xlen-1))) +#define UCAUSE_INTERRUPT_ALL_SET_MASK 0x1 +#define UCAUSE_EXCEPTION_CODE_BIT_OFFSET 0 +#define UCAUSE_EXCEPTION_CODE_BIT_WIDTH ((__riscv_xlen-2)-(0) + 1) +#define UCAUSE_EXCEPTION_CODE_BIT_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) +#define UCAUSE_EXCEPTION_CODE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-2)-(0) + 1)-1)) << (0)) + +/******************************************* + * utvec - URW - User Trap Vector Base Address + */ +static inline uint_xlen_t csr_read_utvec(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, utvec" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_utvec(uint_xlen_t value) { + __asm__ volatile ("csrw utvec, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_utvec(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, utvec, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} +/* Register CSR bit set and clear instructions */ +static inline void csr_set_bits_utvec(uint_xlen_t mask) { + __asm__ volatile ("csrrs zero, utvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline void csr_clr_bits_utvec(uint_xlen_t mask) { + __asm__ volatile ("csrrc zero, utvec, %0" + : /* output: none */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_set_bits_utvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrs %0, utvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +static inline uint_xlen_t csr_read_clr_bits_utvec(uint_xlen_t mask) { + uint_xlen_t value; + __asm__ volatile ("csrrc %0, utvec, %1" + : "=r" (value) /* output: register %0 */ + : "r" (mask) /* input : register */ + : /* clobbers: none */); + return value; +} +/* utvec, CSR write value via immediate value (only up to 5 bits) */ +#define CSR_WRITE_IMM_UTVEC(VALUE) \ + __asm__ volatile ("csrrwi zero, utvec, %0" \ + : /* output: none */ \ + : "i" (VALUE) /* input : immediate */ \ + : /* clobbers: none */) + +/* utvec, CSR set bits via immediate value mask (only up to 5 bits) */ +#define CSR_SET_BITS_IMM_UTVEC(MASK) \ + __asm__ volatile ("csrrsi zero, utvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) + +/* utvec, CSR clear bits via immediate value mask (only up to 5 bits) */ +#define CSR_CLR_BITS_IMM_UTVEC(MASK) \ + __asm__ volatile ("csrrci zero, utvec, %0" \ + : /* output: none */ \ + : "i" (MASK) /* input : immediate */ \ + : /* clobbers: none */) +#define UTVEC_BASE_BIT_OFFSET 2 +#define UTVEC_BASE_BIT_WIDTH ((__riscv_xlen-1)-(2) + 1) +#define UTVEC_BASE_BIT_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (2)) +#define UTVEC_BASE_ALL_SET_MASK ((1UL<<(((__riscv_xlen-1)-(2) + 1)-1)) << (0)) +#define UTVEC_MODE_BIT_OFFSET 0 +#define UTVEC_MODE_BIT_WIDTH 2 +#define UTVEC_MODE_BIT_MASK 0x3 +#define UTVEC_MODE_ALL_SET_MASK 0x3 + +/******************************************* + * utval - URW - User Trap Value + */ +static inline uint_xlen_t csr_read_utval(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, utval" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_utval(uint_xlen_t value) { + __asm__ volatile ("csrw utval, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_utval(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, utval, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * fflags - URW - Floating-Point Accrued Exceptions. + */ +static inline uint_xlen_t csr_read_fflags(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, fflags" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_fflags(uint_xlen_t value) { + __asm__ volatile ("csrw fflags, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_fflags(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, fflags, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * frm - URW - Floating-Point Dynamic Rounding Mode. + */ +static inline uint_xlen_t csr_read_frm(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, frm" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_frm(uint_xlen_t value) { + __asm__ volatile ("csrw frm, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_frm(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, frm, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * fcsr - URW - Floating-Point Control and Status + */ +static inline uint_xlen_t csr_read_fcsr(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, fcsr" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_fcsr(uint_xlen_t value) { + __asm__ volatile ("csrw fcsr, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_fcsr(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, fcsr, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * cycle - URO - Cycle counter for RDCYCLE instruction. + */ +static inline uint_xlen_t csr_read_cycle(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, cycle" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * time - URO - Timer for RDTIME instruction. + */ +static inline uint_xlen_t csr_read_time(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, time" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * instret - URO - Instructions-retired counter for RDINSTRET instruction. + */ +static inline uint_xlen_t csr_read_instret(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, instret" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter3 - URO - Performance-monitoring counter. + */ +static inline uint_xlen_t csr_read_hpmcounter3(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter3" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter4 - URO - Performance-monitoring counter. + */ +static inline uint_xlen_t csr_read_hpmcounter4(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter4" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter31 - URO - Performance-monitoring counter. + */ +static inline uint_xlen_t csr_read_hpmcounter31(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter31" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * cycleh - URO - Upper 32 bits of cycle, RV32I only. + */ +static inline uint_xlen_t csr_read_cycleh(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, cycleh" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * timeh - URO - Upper 32 bits of time, RV32I only. + */ +static inline uint_xlen_t csr_read_timeh(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, timeh" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * instreth - URO - Upper 32 bits of instret, RV32I only. + */ +static inline uint_xlen_t csr_read_instreth(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, instreth" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter3h - URO - Upper 32 bits of hpmcounter3, RV32I only. + */ +static inline uint_xlen_t csr_read_hpmcounter3h(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter3h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter4h - URO - Upper 32 bits of hpmcounter4, RV32I only. + */ +static inline uint_xlen_t csr_read_hpmcounter4h(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter4h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * hpmcounter31h - URO - Upper 32 bits of hpmcounter31, RV32I only. + */ +static inline uint_xlen_t csr_read_hpmcounter31h(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hpmcounter31h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * stval - SRW - Supervisor bad address or instruction. + */ +static inline uint_xlen_t csr_read_stval(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, stval" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_stval(uint_xlen_t value) { + __asm__ volatile ("csrw stval, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_stval(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, stval, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * satp - SRW - Supervisor address translation and protection. + */ +static inline uint_xlen_t csr_read_satp(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, satp" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_satp(uint_xlen_t value) { + __asm__ volatile ("csrw satp, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_satp(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, satp, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hstatus - HRW - Hypervisor status register. + */ +static inline uint_xlen_t csr_read_hstatus(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hstatus" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hstatus(uint_xlen_t value) { + __asm__ volatile ("csrw hstatus, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hstatus(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hstatus, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hedeleg - HRW - Hypervisor exception delegation register. + */ +static inline uint_xlen_t csr_read_hedeleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hedeleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hedeleg(uint_xlen_t value) { + __asm__ volatile ("csrw hedeleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hedeleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hedeleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hideleg - HRW - Hypervisor interrupt delegation register. + */ +static inline uint_xlen_t csr_read_hideleg(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hideleg" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hideleg(uint_xlen_t value) { + __asm__ volatile ("csrw hideleg, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hideleg(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hideleg, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hcounteren - HRW - Hypervisor counter enable. + */ +static inline uint_xlen_t csr_read_hcounteren(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hcounteren" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hcounteren(uint_xlen_t value) { + __asm__ volatile ("csrw hcounteren, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hcounteren(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hcounteren, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hgatp - HRW - Hypervisor guest address translation and protection. + */ +static inline uint_xlen_t csr_read_hgatp(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hgatp" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hgatp(uint_xlen_t value) { + __asm__ volatile ("csrw hgatp, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hgatp(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hgatp, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * htimedelta - HRW - Delta for VS/VU-mode timer. + */ +static inline uint_xlen_t csr_read_htimedelta(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, htimedelta" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_htimedelta(uint_xlen_t value) { + __asm__ volatile ("csrw htimedelta, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_htimedelta(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, htimedelta, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * htimedeltah - HRW - Upper 32 bits of htimedelta, RV32I only. + */ +static inline uint_xlen_t csr_read_htimedeltah(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, htimedeltah" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_htimedeltah(uint_xlen_t value) { + __asm__ volatile ("csrw htimedeltah, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_htimedeltah(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, htimedeltah, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsstatus - HRW - Virtual supervisor status register. + */ +static inline uint_xlen_t csr_read_vsstatus(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsstatus" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsstatus(uint_xlen_t value) { + __asm__ volatile ("csrw vsstatus, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsstatus(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsstatus, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsie - HRW - Virtual supervisor interrupt-enable register. + */ +static inline uint_xlen_t csr_read_vsie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsie(uint_xlen_t value) { + __asm__ volatile ("csrw vsie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vstvec - HRW - Virtual supervisor trap handler base address. + */ +static inline uint_xlen_t csr_read_vstvec(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vstvec" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vstvec(uint_xlen_t value) { + __asm__ volatile ("csrw vstvec, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vstvec(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vstvec, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsscratch - HRW - Virtual supervisor scratch register. + */ +static inline uint_xlen_t csr_read_vsscratch(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsscratch" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsscratch(uint_xlen_t value) { + __asm__ volatile ("csrw vsscratch, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsscratch(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsscratch, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsepc - HRW - Virtual supervisor exception program counter. + */ +static inline uint_xlen_t csr_read_vsepc(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsepc" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsepc(uint_xlen_t value) { + __asm__ volatile ("csrw vsepc, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsepc(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsepc, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vscause - HRW - Virtual supervisor trap cause. + */ +static inline uint_xlen_t csr_read_vscause(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vscause" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vscause(uint_xlen_t value) { + __asm__ volatile ("csrw vscause, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vscause(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vscause, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vstval - HRW - Virtual supervisor bad address or instruction. + */ +static inline uint_xlen_t csr_read_vstval(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vstval" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vstval(uint_xlen_t value) { + __asm__ volatile ("csrw vstval, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vstval(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vstval, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsip - HRW - Virtual supervisor interrupt pending. + */ +static inline uint_xlen_t csr_read_vsip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsip(uint_xlen_t value) { + __asm__ volatile ("csrw vsip, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsip(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsip, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * vsatp - HRW - Virtual supervisor address translation and protection. + */ +static inline uint_xlen_t csr_read_vsatp(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, vsatp" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_vsatp(uint_xlen_t value) { + __asm__ volatile ("csrw vsatp, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_vsatp(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, vsatp, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mbase - MRW - Base register. + */ +static inline uint_xlen_t csr_read_mbase(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mbase" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mbase(uint_xlen_t value) { + __asm__ volatile ("csrw mbase, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mbase(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mbase, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mbound - MRW - Bound register. + */ +static inline uint_xlen_t csr_read_mbound(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mbound" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mbound(uint_xlen_t value) { + __asm__ volatile ("csrw mbound, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mbound(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mbound, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mibase - MRW - Instruction base register. + */ +static inline uint_xlen_t csr_read_mibase(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mibase" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mibase(uint_xlen_t value) { + __asm__ volatile ("csrw mibase, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mibase(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mibase, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mibound - MRW - Instruction bound register. + */ +static inline uint_xlen_t csr_read_mibound(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mibound" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mibound(uint_xlen_t value) { + __asm__ volatile ("csrw mibound, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mibound(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mibound, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mdbase - MRW - Data base register. + */ +static inline uint_xlen_t csr_read_mdbase(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mdbase" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mdbase(uint_xlen_t value) { + __asm__ volatile ("csrw mdbase, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mdbase(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mdbase, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mdbound - MRW - Data bound register. + */ +static inline uint_xlen_t csr_read_mdbound(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mdbound" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mdbound(uint_xlen_t value) { + __asm__ volatile ("csrw mdbound, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mdbound(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mdbound, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpcfg0 - MRW - Physical memory protection configuration. + */ +static inline uint_xlen_t csr_read_pmpcfg0(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpcfg0" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpcfg0(uint_xlen_t value) { + __asm__ volatile ("csrw pmpcfg0, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpcfg0(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpcfg0, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpcfg1 - MRW - Physical memory protection configuration, RV32 only. + */ +static inline uint_xlen_t csr_read_pmpcfg1(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpcfg1" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpcfg1(uint_xlen_t value) { + __asm__ volatile ("csrw pmpcfg1, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpcfg1(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpcfg1, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpcfg2 - MRW - Physical memory protection configuration. + */ +static inline uint_xlen_t csr_read_pmpcfg2(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpcfg2" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpcfg2(uint_xlen_t value) { + __asm__ volatile ("csrw pmpcfg2, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpcfg2(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpcfg2, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpcfg3 - MRW - Physical memory protection configuration, RV32 only. + */ +static inline uint_xlen_t csr_read_pmpcfg3(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpcfg3" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpcfg3(uint_xlen_t value) { + __asm__ volatile ("csrw pmpcfg3, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpcfg3(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpcfg3, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpaddr0 - MRW - Physical memory protection address register. + */ +static inline uint_xlen_t csr_read_pmpaddr0(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpaddr0" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpaddr0(uint_xlen_t value) { + __asm__ volatile ("csrw pmpaddr0, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpaddr0(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpaddr0, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpaddr1 - MRW - Physical memory protection address register. + */ +static inline uint_xlen_t csr_read_pmpaddr1(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpaddr1" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpaddr1(uint_xlen_t value) { + __asm__ volatile ("csrw pmpaddr1, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpaddr1(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpaddr1, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * pmpaddr15 - MRW - Physical memory protection address register. + */ +static inline uint_xlen_t csr_read_pmpaddr15(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, pmpaddr15" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_pmpaddr15(uint_xlen_t value) { + __asm__ volatile ("csrw pmpaddr15, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_pmpaddr15(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, pmpaddr15, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter4 - MRW - Machine performance-monitoring counter. + */ +static inline uint_xlen_t csr_read_mhpmcounter4(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhpmcounter4" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter4(uint_xlen_t value) { + __asm__ volatile ("csrw mhpmcounter4, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mhpmcounter4(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter4, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter31 - MRW - Machine performance-monitoring counter. + */ +static inline uint_xlen_t csr_read_mhpmcounter31(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhpmcounter31" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter31(uint_xlen_t value) { + __asm__ volatile ("csrw mhpmcounter31, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mhpmcounter31(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter31, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mcycleh - MRW - Upper 32 bits of mcycle, RV32I only. + */ +static inline uint32_t csr_read_mcycleh(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mcycleh" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mcycleh(uint_csr32_t value) { + __asm__ volatile ("csrw mcycleh, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mcycleh(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mcycleh, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * minstreth - MRW - Upper 32 bits of minstret, RV32I only. + */ +static inline uint32_t csr_read_minstreth(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, minstreth" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_minstreth(uint_csr32_t value) { + __asm__ volatile ("csrw minstreth, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_minstreth(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, minstreth, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter3h - MRW - Upper 32 bits of mhpmcounter3, RV32I only. + */ +static inline uint32_t csr_read_mhpmcounter3h(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mhpmcounter3h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter3h(uint_csr32_t value) { + __asm__ volatile ("csrw mhpmcounter3h, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mhpmcounter3h(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter3h, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter4h - MRW - Upper 32 bits of mhpmcounter4, RV32I only. + */ +static inline uint32_t csr_read_mhpmcounter4h(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mhpmcounter4h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter4h(uint_csr32_t value) { + __asm__ volatile ("csrw mhpmcounter4h, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mhpmcounter4h(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter4h, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmcounter31h - MRW - Upper 32 bits of mhpmcounter31, RV32I only. + */ +static inline uint32_t csr_read_mhpmcounter31h(void) { + uint_csr32_t value; + __asm__ volatile ("csrr %0, mhpmcounter31h" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmcounter31h(uint_csr32_t value) { + __asm__ volatile ("csrw mhpmcounter31h, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint32_t csr_read_write_mhpmcounter31h(uint32_t new_value) { + uint_csr32_t prev_value; + __asm__ volatile ("csrrw %0, mhpmcounter31h, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmevent4 - MRW - Machine performance-monitoring event selector. + */ +static inline uint_xlen_t csr_read_mhpmevent4(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhpmevent4" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmevent4(uint_xlen_t value) { + __asm__ volatile ("csrw mhpmevent4, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mhpmevent4(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mhpmevent4, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mhpmevent31 - MRW - Machine performance-monitoring event selector. + */ +static inline uint_xlen_t csr_read_mhpmevent31(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mhpmevent31" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mhpmevent31(uint_xlen_t value) { + __asm__ volatile ("csrw mhpmevent31, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mhpmevent31(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mhpmevent31, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * tselect - MRW - Debug/Trace trigger register select. + */ +static inline uint_xlen_t csr_read_tselect(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, tselect" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_tselect(uint_xlen_t value) { + __asm__ volatile ("csrw tselect, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_tselect(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, tselect, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * tdata1 - MRW - First Debug/Trace trigger data register. + */ +static inline uint_xlen_t csr_read_tdata1(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, tdata1" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_tdata1(uint_xlen_t value) { + __asm__ volatile ("csrw tdata1, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_tdata1(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, tdata1, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * tdata2 - MRW - Second Debug/Trace trigger data register. + */ +static inline uint_xlen_t csr_read_tdata2(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, tdata2" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_tdata2(uint_xlen_t value) { + __asm__ volatile ("csrw tdata2, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_tdata2(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, tdata2, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * tdata3 - MRW - Third Debug/Trace trigger data register. + */ +static inline uint_xlen_t csr_read_tdata3(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, tdata3" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_tdata3(uint_xlen_t value) { + __asm__ volatile ("csrw tdata3, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_tdata3(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, tdata3, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * dcsr - DRW - Debug control and status register. + */ +static inline uint_xlen_t csr_read_dcsr(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, dcsr" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_dcsr(uint_xlen_t value) { + __asm__ volatile ("csrw dcsr, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_dcsr(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, dcsr, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * dpc - DRW - Debug PC. + */ +static inline uint_xlen_t csr_read_dpc(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, dpc" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_dpc(uint_xlen_t value) { + __asm__ volatile ("csrw dpc, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_dpc(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, dpc, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * dscratch0 - DRW - Debug scratch register 0. + */ +static inline uint_xlen_t csr_read_dscratch0(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, dscratch0" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_dscratch0(uint_xlen_t value) { + __asm__ volatile ("csrw dscratch0, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_dscratch0(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, dscratch0, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * dscratch1 - DRW - Debug scratch register 1. + */ +static inline uint_xlen_t csr_read_dscratch1(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, dscratch1" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_dscratch1(uint_xlen_t value) { + __asm__ volatile ("csrw dscratch1, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_dscratch1(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, dscratch1, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hie - HRW - Hypervisor interrupt-enable register. + */ +static inline uint_xlen_t csr_read_hie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hie(uint_xlen_t value) { + __asm__ volatile ("csrw hie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hgeie - HRW - Hypervisor guest external interrupt-enable register. + */ +static inline uint_xlen_t csr_read_hgeie(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hgeie" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hgeie(uint_xlen_t value) { + __asm__ volatile ("csrw hgeie, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hgeie(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hgeie, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * htval - HRW - Hypervisor bad guest physical address. + */ +static inline uint_xlen_t csr_read_htval(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, htval" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_htval(uint_xlen_t value) { + __asm__ volatile ("csrw htval, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_htval(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, htval, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hip - HRW - Hypervisor interrupt pending. + */ +static inline uint_xlen_t csr_read_hip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_hip(uint_xlen_t value) { + __asm__ volatile ("csrw hip, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_hip(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, hip, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * htinst - HRW - Hypervisor trap instruction (transformed). + */ +static inline uint_xlen_t csr_read_htinst(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, htinst" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_htinst(uint_xlen_t value) { + __asm__ volatile ("csrw htinst, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_htinst(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, htinst, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * hgeip - HRO - Hypervisor guest external interrupt pending. + */ +static inline uint_xlen_t csr_read_hgeip(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, hgeip" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} + +/******************************************* + * mtinst - MRW - Machine trap instruction (transformed). + */ +static inline uint_xlen_t csr_read_mtinst(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mtinst" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mtinst(uint_xlen_t value) { + __asm__ volatile ("csrw mtinst, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mtinst(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mtinst, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + +/******************************************* + * mtval2 - MRW - Machine bad guest physical address. + */ +static inline uint_xlen_t csr_read_mtval2(void) { + uint_xlen_t value; + __asm__ volatile ("csrr %0, mtval2" + : "=r" (value) /* output : register */ + : /* input : none */ + : /* clobbers: none */); + return value; +} +static inline void csr_write_mtval2(uint_xlen_t value) { + __asm__ volatile ("csrw mtval2, %0" + : /* output: none */ + : "r" (value) /* input : from register */ + : /* clobbers: none */); +} +static inline uint_xlen_t csr_read_write_mtval2(uint_xlen_t new_value) { + uint_xlen_t prev_value; + __asm__ volatile ("csrrw %0, mtval2, %1" + : "=r" (prev_value) /* output: register %0 */ + : "r" (new_value) /* input : register */ + : /* clobbers: none */); + return prev_value; +} + + +#endif // #define RISCV_CSR_H \ No newline at end of file diff --git a/port/moonlight/riscv-traps.h b/port/moonlight/riscv-traps.h new file mode 100644 index 0000000..bfb6175 --- /dev/null +++ b/port/moonlight/riscv-traps.h @@ -0,0 +1,68 @@ +/* + RISC-V machine interrupts. + SPDX-License-Identifier: Unlicense + + https://five-embeddev.com/ + +*/ + +#ifndef RISCV_TRAPS_H +#define RISCV_TRAPS_H + +enum { + RISCV_INT_MSI = 3, + RISCV_INT_MTI = 7, + RISCV_INT_MEI = 11, + RISCV_INT_SSI = 1, + RISCV_INT_STI = 5, + RISCV_INT_SEI = 9, + RISCV_INT_USI = 0, + RISCV_INT_UTI = 4, + RISCV_INT_UEI = 8, +}; + +enum { + RISCV_INT_POS_MSI = 3, + RISCV_INT_POS_MTI = 7, + RISCV_INT_POS_MEI = 11, + RISCV_INT_POS_SSI = 1, + RISCV_INT_POS_STI = 5, + RISCV_INT_POS_SEI = 9, + RISCV_INT_POS_USI = 0, + RISCV_INT_POS_UTI = 4, + RISCV_INT_POS_UEI = 8, +}; + +enum { + RISCV_INT_MASK_MSI = (1UL< +#include +#include +#include +#include "riscv-traps.h" + +#if __riscv_xlen == 64 +#define INTERRUPT_BIT 0x8000000000000000ull +#else +#define INTERRUPT_BIT 0x80000000ull +#endif +#define OS_IS_INTERRUPT(mcause) (mcause & INTERRUPT_BIT) +#define OS_IS_TICK_INT(mcause) (mcause == (0x7 | INTERRUPT_BIT)) +#define OS_IS_SOFT_INT(mcause) (mcause == (0x3 | INTERRUPT_BIT)) +#define OS_IS_EXT_INT(mcause) (mcause == (0xb | INTERRUPT_BIT)) + +extern void _tx_timer_interrupt(void); +extern uintptr_t exception(uintptr_t mcause, uintptr_t mepc, uintptr_t mtval); + +void (*irq_handler[__riscv_xlen])(); + +int register_irq_handler(unsigned irq_num, void (*handler)()) { + if(irq_num<__riscv_xlen){ + irq_handler[irq_num] = handler; + return 1; + } + return 0; +} + +void trap_handler(uintptr_t mcause, uintptr_t mepc, uintptr_t mtval) { + if(OS_IS_INTERRUPT(mcause)) { + unsigned irq_id = mcause&(__riscv_xlen-1); + switch(irq_id){ + case RISCV_INT_MTI: + hwtimer_handler(); + _tx_timer_interrupt(); + break; + case RISCV_INT_MEI: + puts("[INTERRUPT]: handler ext irq error!\n"); + while(1) + ; + break; + default: + if(irq_handler[irq_id]) + irq_handler[irq_id](); + else { + printf("[INTERRUPT]: Unkown Interrupt %d!!\n", mcause&0xff); + puts("[INTERRUPT]: now can't deal with the interrupt!\n"); + while(1) + ; + } + } + } else { + exception( mcause, mepc, mtval); + } +} diff --git a/port/moonlight/trap_vectored.c b/port/moonlight/trap_vectored.c new file mode 100644 index 0000000..28c6a62 --- /dev/null +++ b/port/moonlight/trap_vectored.c @@ -0,0 +1,78 @@ +/* + Baremetal main program with timer interrupt. + SPDX-License-Identifier: Unlicense + + https://five-embeddev.com/ + + Tested with sifive-hifive-revb, but should not have any + dependencies to any particular implementation. + +*/ + +// RISC-V CSR definitions and access classes +#include "riscv-csr.h" +#include "riscv-interrupt.h" +#include "hwtimer.h" +#include "vector_table.h" + +extern void _tx_timer_interrupt(void); + +// Machine mode interrupt service routine + +// Global to hold current timestamp, written in MTI handler. +static volatile uint64_t timestamp = 0; + +#define RISCV_MTVEC_MODE_VECTORED 1 + +int init_irq(void) { + // Global interrupt disable + csr_clr_bits_mstatus(MSTATUS_MIE_BIT_MASK); + csr_write_mie(0); + + // Setup the IRQ handler entry point, set the mode to vectored + csr_write_mtvec((uint_xlen_t) riscv_mtvec_table | RISCV_MTVEC_MODE_VECTORED); + + // Enable MIE.MTI + csr_set_bits_mie(MIE_MTI_BIT_MASK); + + // Global interrupt enable + csr_set_bits_mstatus(MSTATUS_MIE_BIT_MASK); + + // Setup timer for 1 second interval + hwtimer_init(); + + // Busy loop + do { + // Wait for timer interrupt + __asm__ volatile ("wfi"); + // Try a synchronous exception. + __asm__ volatile ("ecall"); + } while (1); + + // Will not reach here + return 0; +} + +#pragma GCC push_options +// Force the alignment for mtvec.BASE. A 'C' extension program could be aligned to to bytes. +#pragma GCC optimize ("align-functions=4") +// The 'riscv_mtvec_mti' function is added to the vector table by the vector_table.c +void riscv_mtvec_mti(void) { + hwtimer_handler(); + _tx_timer_interrupt(); +} +// The 'riscv_mtvec_exception' function is added to the vector table by the vector_table.c +// This function looks at the cause of the exception, if it is an 'ecall' instruction then increment a global counter. +void riscv_mtvec_exception(void) { + uint_xlen_t this_cause = csr_read_mcause(); + uint_xlen_t this_pc = csr_read_mepc(); + //uint_xlen_t this_value = csr_read_mtval(); + switch (this_cause) { + case RISCV_EXCP_ENVIRONMENT_CALL_FROM_M_MODE: + ecall_count++; + // Make sure the return address is the instruction AFTER ecall + csr_write_mepc(this_pc+4); + break; + } +} +#pragma GCC pop_options \ No newline at end of file diff --git a/port/moonlight/tx_timer_interrupt.c b/port/moonlight/tx_timer_interrupt.c new file mode 100644 index 0000000..3c90d0a --- /dev/null +++ b/port/moonlight/tx_timer_interrupt.c @@ -0,0 +1,134 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Timer */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#define TX_SOURCE_CODE + +/* Include necessary system files. */ + +#include "tx_api.h" +#include "tx_timer.h" +#include "tx_thread.h" + +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_timer_interrupt RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function processes the hardware timer interrupt. This */ +/* processing includes incrementing the system clock and checking for */ +/* time slice and/or timer expiration. If either is found, the */ +/* interrupt context save/restore functions are called along with the */ +/* expiration functions. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* _tx_timer_expiration_process Timer expiration processing */ +/* _tx_thread_time_slice Time slice interrupted thread */ +/* */ +/* CALLED BY */ +/* */ +/* interrupt vector */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +VOID _tx_timer_interrupt(VOID) +{ + /* Increment system clock. */ + _tx_timer_system_clock++; + + /* Test for time-slice expiration. */ + if (_tx_timer_time_slice) + { + /* Decrement the time_slice. */ + _tx_timer_time_slice--; + + /* Check for expiration. */ + if (_tx_timer_time_slice == 0) + { + + /* Set the time-slice expired flag. */ + _tx_timer_expired_time_slice = TX_TRUE; + } + } + + /* Test for timer expiration. */ + if (*_tx_timer_current_ptr) + { + + /* Set expiration flag. */ + _tx_timer_expired = TX_TRUE; + } + else + { + + /* No timer expired, increment the timer pointer. */ + _tx_timer_current_ptr++; + + /* Check for wrap-around. */ + if (_tx_timer_current_ptr == _tx_timer_list_end) + { + + /* Wrap to beginning of list. */ + _tx_timer_current_ptr = _tx_timer_list_start; + } + } + + /* See if anything has expired. */ + if ((_tx_timer_expired_time_slice) || (_tx_timer_expired)) + { + + /* Did a timer expire? */ + if (_tx_timer_expired) + { + + /* Process timer expiration. */ + _tx_timer_expiration_process(); + } + + /* Did time slice expire? */ + if (_tx_timer_expired_time_slice) + { + + /* Time slice interrupted thread. */ + _tx_thread_time_slice(); + } + } +} diff --git a/port/moonlight/uart.h b/port/moonlight/uart.h new file mode 100644 index 0000000..38eb4b3 --- /dev/null +++ b/port/moonlight/uart.h @@ -0,0 +1,25 @@ +#ifndef _DEVICES_UART_H +#define _DEVICES_UART_H +#include "gen/uart.h" +#include + +static inline uint32_t uart_get_tx_free(volatile uart_t* reg) { return get_uart_rx_tx_reg_tx_free(reg); } + +static inline uint32_t uart_get_tx_empty(volatile uart_t* reg) { return get_uart_rx_tx_reg_tx_empty(reg); } + +static inline uint32_t uart_get_rx_avail(volatile uart_t* reg) { return get_uart_rx_tx_reg_rx_avail(reg); } + +static inline void uart_write(volatile uart_t* reg, uint8_t data) { + while(get_uart_rx_tx_reg_tx_free(reg) == 0) + ; + set_uart_rx_tx_reg_data(reg, data); +} + +static inline uint8_t uart_read(volatile uart_t* reg) { + uint32_t res = get_uart_rx_tx_reg_data(reg); + while((res & 0x10000) == 0) + res = get_uart_rx_tx_reg_data(reg); + return res; +} + +#endif /* _DEVICES_UART_H */ diff --git a/port/moonlight/vector_table.c b/port/moonlight/vector_table.c new file mode 100644 index 0000000..a6e82ed --- /dev/null +++ b/port/moonlight/vector_table.c @@ -0,0 +1,166 @@ +/* + Baremetal main program with timer interrupt. + SPDX-License-Identifier: Unlicense + + https://five-embeddev.com/ + + Tested with sifive-hifive-revb, but should not have any + dependencies to any particular implementation. + +*/ + + +// Makes use of GCC interrupt and weak reference/alias attributes +// https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes +// https://gcc.gnu.org/onlinedocs/gcc/RISC-V-Function-Attributes.html#RISC-V-Function-Attributes + +// Vector table - not to be called. +void riscv_mtvec_table(void) __attribute__ ((naked, section(".text.mtvec_table") ,aligned(256))); +void riscv_stvec_table(void) __attribute__ ((naked, section(".text.stvec_table") ,aligned(256))); +void riscv_utvec_table(void) __attribute__ ((naked, section(".text.utvec_table") ,aligned(256))); + +// Default "NOP" implementations +static void riscv_nop_machine(void) __attribute__ ((interrupt ("machine")) ); +static void riscv_nop_supervisor(void) __attribute__ ((interrupt ("supervisor")) ); +static void riscv_nop_user(void) __attribute__ ((interrupt ("user")) ); + +// Weak alias to the "NOP" implementations. If another function +void riscv_mtvec_exception(void) __attribute__ ((interrupt ("machine") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_msi(void) __attribute__ ((interrupt ("machine") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_mti(void) __attribute__ ((interrupt ("machine") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_mei(void) __attribute__ ((interrupt ("machine") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_ssi(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_sti(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_machine") )); +void riscv_mtvec_sei(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_machine") )); + +void riscv_stvec_exception(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_supervisor") )); +void riscv_stvec_ssi(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_supervisor") )); +void riscv_stvec_sti(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_supervisor") )); +void riscv_stvec_sei(void) __attribute__ ((interrupt ("supervisor") , weak, alias("riscv_nop_supervisor") )); + +void riscv_utvec_usi(void) __attribute__ ((interrupt ("user") , weak, alias("riscv_nop_user") )); +void riscv_utvec_uti(void) __attribute__ ((interrupt ("user") , weak, alias("riscv_nop_user") )); +void riscv_utvec_uei(void) __attribute__ ((interrupt ("user") , weak, alias("riscv_nop_user") )); + +#ifndef VECTOR_TABLE_MTVEC_PLATFORM_INTS + +void moonlight_mtvec_irq0(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq1(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq2(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq3(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq4(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq5(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq6(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq7(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq8(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq9(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq10(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq11(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq12(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq13(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq14(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); +void moonlight_mtvec_irq15(void) __attribute__ ((interrupt ("machine"), weak, alias("riscv_nop_machine") )); + +#endif // #ifndef VECTOR_TABLE_MTVEC_PLATFORM_INTS + +#pragma GCC push_options + +// Ensure the vector table is aligned. +// The bottom 4 bits of MTVEC are ignored - so align to 16 bytes + +// Vector table. Do not call! +// Possible entries defined by mcause table +// http://five-embeddev.com/riscv-isa-manual/latest/machine.html#sec:mcause +// +// When vectored interrupts are enabled, interrupt cause 0, which +// corresponds to user-mode software interrupts, are vectored to the +// same location as synchronous exceptions. This ambiguity does not +// arise in practice, since user-mode software interrupts are either +// disabled or delegated to user mode. +void riscv_mtvec_table(void) { + __asm__ volatile ( + ".org riscv_mtvec_table + 0*4;" + "jal zero,riscv_mtvec_exception;" /* 0 */ + ".org riscv_mtvec_table + 1*4;" + "jal zero,riscv_mtvec_ssi;" /* 1 */ + ".org riscv_mtvec_table + 3*4;" + "jal zero,riscv_mtvec_msi;" /* 3 */ + ".org riscv_mtvec_table + 5*4;" + "jal zero,riscv_mtvec_sti;" /* 5 */ + ".org riscv_mtvec_table + 7*4;" + "jal zero,riscv_mtvec_mti;" /* 7 */ + ".org riscv_mtvec_table + 9*4;" + "jal zero,riscv_mtvec_sei;" /* 9 */ + ".org riscv_mtvec_table + 11*4;" + "jal zero,riscv_mtvec_mei;" /* 11 */ +#ifndef VECTOR_TABLE_MTVEC_PLATFORM_INTS + ".org riscv_mtvec_table + 16*4;" + "jal moonlight_mtvec_irq0;" + "jal moonlight_mtvec_irq1;" + "jal moonlight_mtvec_irq2;" + "jal moonlight_mtvec_irq3;" + "jal moonlight_mtvec_irq4;" + "jal moonlight_mtvec_irq5;" + "jal moonlight_mtvec_irq6;" + "jal moonlight_mtvec_irq7;" + "jal moonlight_mtvec_irq8;" + "jal moonlight_mtvec_irq9;" + "jal moonlight_mtvec_irq10;" + "jal moonlight_mtvec_irq11;" + "jal moonlight_mtvec_irq12;" + "jal moonlight_mtvec_irq13;" + "jal moonlight_mtvec_irq14;" + "jal moonlight_mtvec_irq15;" +#endif + : /* output: none */ + : /* input : immediate */ + : /* clobbers: none */ + ); +} +// Vector table. Do not call! +// See scause table for possible entries. +// http://five-embeddev.com/riscv-isa-manual/latest/supervisor.html#sec:scause +void riscv_stvec_table(void) { + __asm__ volatile ( + ".org riscv_stvec_table + 0*4;" + "jal zero,riscv_stvec_exception;" /* 0 */ + ".org riscv_stvec_table + 1*4;" + "jal zero,riscv_stvec_ssi;" /* 1 */ + ".org riscv_stvec_table + 5*4;" + "jal zero,riscv_stvec_sti;" /* 5 */ + ".org riscv_stvec_table + 9*4;" + "jal zero,riscv_stvec_sei;" /* 9 */ + : /* output: none */ + : /* input : immediate */ + : /* clobbers: none */ + ); +} +// Vector table. Do not call! +void riscv_utvec_table(void) { + __asm__ volatile ( + ".org riscv_utvec_table + 0*4;" + "jal zero,riscv_utvec_usi;" /* 0 */ + ".org riscv_utvec_table + 4*4;" + "jal zero,riscv_utvec_uti;" /* 4 */ + ".org riscv_utvec_table + 8*4;" + "jal zero,riscv_utvec_uei;" /* 8 */ + : /* output: none */ + : /* input : immediate */ + : /* clobbers: none */ + ); +} + +// Ensure all ISR functions are aligned. +#pragma GCC optimize ("align-functions=4") + +static void riscv_nop_machine(void) { + // Nop machine mode interrupt. +} +static void riscv_nop_supervisor(void) { + // Nop supervisor mode interrupt. +} +static void riscv_nop_user(void) { + // Nop user mode interrupt. +} + +#pragma GCC pop_options \ No newline at end of file diff --git a/port/moonlight/vector_table.h b/port/moonlight/vector_table.h new file mode 100644 index 0000000..6882572 --- /dev/null +++ b/port/moonlight/vector_table.h @@ -0,0 +1,113 @@ +/* + Baremetal main program with timer interrupt. + SPDX-License-Identifier: Unlicense + + https://five-embeddev.com/ + + Tested with sifive-hifive-revb, but should not have any + dependencies to any particular implementation. + + Declarations of interrupt service routine entry points. + + If no implementation is defined then an alias to a default "NOP" + implementation will be linked instead. + +*/ + +#ifndef VECTOR_TABLE_H +#define VECTOR_TABLE_H + + +/** Symbol for machine mode vector table - do not call + */ +void riscv_mtvec_table(void) __attribute__ ((naked)); +void riscv_stvec_table(void) __attribute__ ((naked)); +void riscv_utvec_table(void) __attribute__ ((naked)); + +/** Machine mode syncronous exception handler. + +http://five-embeddev.com/riscv-isa-manual/latest/machine.html#machine-trap-vector-base-address-register-mtvec + +When vectored interrupts are enabled, interrupt cause 0, which +corresponds to user-mode software interrupts, are vectored to the same +location as synchronous exceptions. This ambiguity does not arise in +practice, since user-mode software interrupts are either disabled or +delegated to user mode. + + */ +void riscv_mtvec_exception(void) __attribute__ ((interrupt ("machine")) ); + +/** Machine mode software interrupt */ +void riscv_mtvec_msi(void) __attribute__ ((interrupt ("machine") )); +/** Machine mode timer interrupt */ +void riscv_mtvec_mti(void) __attribute__ ((interrupt ("machine") )); +/** Machine mode al interrupt */ +void riscv_mtvec_mei(void) __attribute__ ((interrupt ("machine") )); + +/** Supervisor mode software interrupt */ +void riscv_mtvec_ssi(void) __attribute__ ((interrupt ("machine")) ); +/** Supervisor mode timer interrupt */ +void riscv_mtvec_sti(void) __attribute__ ((interrupt ("machine")) ); +/** Supervisor mode al interrupt */ +void riscv_mtvec_sei(void) __attribute__ ((interrupt ("machine")) ); + +/** Supervisor mode syncronous exception handler. */ +void riscv_stvec_exception(void) __attribute__ ((interrupt ("supervisor")) ); + +/** Supervisor mode software interrupt */ +void riscv_stvec_ssi(void) __attribute__ ((interrupt ("supervisor")) ); +/** Supervisor mode timer interrupt */ +void riscv_stvec_sti(void) __attribute__ ((interrupt ("supervisor")) ); +/** Supervisor mode al interrupt */ +void riscv_stvec_sei(void) __attribute__ ((interrupt ("supervisor")) ); + +/** User mode software interrupt */ +void riscv_utvec_usi(void) __attribute__ ((interrupt ("user")) ); +/** User mode timer interrupt */ +void riscv_utvec_uti(void) __attribute__ ((interrupt ("user")) ); +/** User mode al interrupt */ +void riscv_utvec_uei(void) __attribute__ ((interrupt ("user")) ); + +#ifndef VECTOR_TABLE_MTVEC_PLATFORM_INTS + +/* Platform interrupts, bits 16+ of mie, mip etc +*/ + +/* Platform interrupt 0, bit 16 of mip/mie */ +void riscv_mtvec_platform_irq0(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 1, bit 17 of mip/mie */ +void riscv_mtvec_platform_irq1(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 2, bit 18 of mip/mie */ +void riscv_mtvec_platform_irq2(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 3, bit 19 of mip/mie */ +void riscv_mtvec_platform_irq3(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 4, bit 20 of mip/mie */ +void riscv_mtvec_platform_irq4(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 5, bit 21 of mip/mie */ +void riscv_mtvec_platform_irq5(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 6, bit 22 of mip/mie */ +void riscv_mtvec_platform_irq6(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 7, bit 23 of mip/mie */ +void riscv_mtvec_platform_irq7(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 8, bit 24 of mip/mie */ +void riscv_mtvec_platform_irq8(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 9, bit 25 of mip/mie */ +void riscv_mtvec_platform_irq9(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 10, bit 26 of mip/mie */ +void riscv_mtvec_platform_irq10(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 11, bit 27 of mip/mie */ +void riscv_mtvec_platform_irq11(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 12, bit 28 of mip/mie */ +void riscv_mtvec_platform_irq12(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 13, bit 29 of mip/mie */ +void riscv_mtvec_platform_irq13(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 14, bit 30 of mip/mie */ +void riscv_mtvec_platform_irq14(void) __attribute__ ((interrupt ("machine")) ); +/* Platform interrupt 15, bit 31 of mip/mie */ +void riscv_mtvec_platform_irq15(void) __attribute__ ((interrupt ("machine")) ); + + +#endif // #ifndef VECTOR_TABLE_MTVEC_PLATFORM_INTS + + +#endif // #ifndef VECTOR_TABLE_H \ No newline at end of file diff --git a/port/picolibc/port.c b/port/picolibc/port.c new file mode 100644 index 0000000..4390972 --- /dev/null +++ b/port/picolibc/port.c @@ -0,0 +1,41 @@ +#include +#include + +extern void uart_putc(char c); +extern int uart_getc(void); + +static int +board_putc(char c, FILE *file) +{ + (void) file; /* Not used in this function */ + uart_putc(c); /* Defined by underlying system */ + return c; +} + +static int +board_getc(FILE *file) +{ + unsigned char c; + (void) file; /* Not used in this function */ + c = uart_getc(); /* Defined by underlying system */ + return c; +} + +static int +board_flush(FILE *file) +{ + (void) file; /* Not used in this function */ + return 0; +} + +static FILE __stdio = FDEV_SETUP_STREAM(board_putc, board_getc, board_flush, _FDEV_SETUP_RW); +/* + * Picolibc requires the application to define these + * when using stdio in freestanding environments. + */ +// FILE * const stdin = NULL; +// FILE * const stdout = NULL; +// FILE * const stderr = NULL; +FILE *const stdin = &__stdio; +__strong_reference(stdin, stdout); +__strong_reference(stdin, stderr); diff --git a/port/threadx/CMakeLists.txt b/port/threadx/CMakeLists.txt new file mode 100644 index 0000000..ed5c4ae --- /dev/null +++ b/port/threadx/CMakeLists.txt @@ -0,0 +1,20 @@ + +target_sources(${PROJECT_NAME} + PRIVATE + # {{BEGIN_TARGET_SOURCES}} + ${CMAKE_CURRENT_LIST_DIR}/src/tx_initialize_low_level.S + ${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_restore.S + ${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_context_save.S + #${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_interrupt_control.S + ${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_schedule.S + ${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_stack_build.S + ${CMAKE_CURRENT_LIST_DIR}/src/tx_thread_system_return.S + #${CMAKE_CURRENT_LIST_DIR}/src/tx_timer_interrupt.c + #${CMAKE_CURRENT_LIST_DIR}/src/platform_rand.c + # {{END_TARGET_SOURCES}} +) + +target_include_directories(${PROJECT_NAME} + PUBLIC + ${CMAKE_CURRENT_LIST_DIR}/inc +) diff --git a/port/threadx/inc/csr.h b/port/threadx/inc/csr.h new file mode 100644 index 0000000..ccb8908 --- /dev/null +++ b/port/threadx/inc/csr.h @@ -0,0 +1,373 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +#ifndef CSR_H +#define CSR_H + + +// Machine Status Register, mstatus +#define MSTATUS_MPP_MASK (3L << 11) // previous mode. +#define MSTATUS_MPP_M (3L << 11) +#define MSTATUS_MPP_S (1L << 11) +#define MSTATUS_MPP_U (0L << 11) +#define MSTATUS_MIE (1L << 3) // machine-mode interrupt enable. +#define MSTATUS_MPIE (1L << 7) +#define MSTATUS_FS (1L << 13) + +// Machine-mode Interrupt Enable +#define MIE_MTIE (1L << 7) +#define MIE_MSIE (1L << 3) +#define MIE_MEIE (1L << 11) +#define MIE_STIE (1L << 5) // supervisor timer +#define MIE_SSIE (1L << 1) +#define MIE_SEIE (1L << 9) + +// Supervisor Status Register, sstatus +#define SSTATUS_SPP (1L << 8) // Previous mode, 1=Supervisor, 0=User +#define SSTATUS_SPIE (1L << 5) // Supervisor Previous Interrupt Enable +#define SSTATUS_UPIE (1L << 4) // User Previous Interrupt Enable +#define SSTATUS_SIE (1L << 1) // Supervisor Interrupt Enable +#define SSTATUS_UIE (1L << 0) // User Interrupt Enable +#define SSTATUS_SPIE (1L << 5) +#define SSTATUS_UPIE (1L << 4) + +// Supervisor Interrupt Enable +#define SIE_SEIE (1L << 9) // external +#define SIE_STIE (1L << 5) // timer +#define SIE_SSIE (1L << 1) // software + +#ifndef __ASSEMBLER__ + +#include + +static inline uint64_t riscv_get_core() +{ + uint64_t x; + asm volatile("csrr %0, mhartid" : "=r" (x) ); + return x; +} + +static inline uint64_t riscv_get_mstatus() +{ + uint64_t x; + asm volatile("csrr %0, mstatus" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_mstatus(uint64_t x) +{ + asm volatile("csrw mstatus, %0" : : "r" (x)); +} + +// machine exception program counter, holds the +// instruction address to which a return from +// exception will go. +static inline void riscv_writ_mepc(uint64_t x) +{ + asm volatile("csrw mepc, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_sstatus() +{ + uint64_t x; + asm volatile("csrr %0, sstatus" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_sstatus(uint64_t x) +{ + asm volatile("csrw sstatus, %0" : : "r" (x)); +} + +// Supervisor Interrupt Pending +static inline uint64_t riscv_get_sip() +{ + uint64_t x; + asm volatile("csrr %0, sip" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_sip(uint64_t x) +{ + asm volatile("csrw sip, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_sie() +{ + uint64_t x; + asm volatile("csrr %0, sie" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_sie(uint64_t x) +{ + asm volatile("csrw sie, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_mie() +{ + uint64_t x; + asm volatile("csrr %0, mie" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_mie(uint64_t x) +{ + asm volatile("csrw mie, %0" : : "r" (x)); +} + +// supervisor exception program counter, holds the +// instruction address to which a return from +// exception will go. +static inline void riscv_writ_sepc(uint64_t x) +{ + asm volatile("csrw sepc, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_sepc() +{ + uint64_t x; + asm volatile("csrr %0, sepc" : "=r" (x) ); + return x; +} + +// Machine Exception Delegation +static inline uint64_t riscv_get_medeleg() +{ + uint64_t x; + asm volatile("csrr %0, medeleg" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_medeleg(uint64_t x) +{ + asm volatile("csrw medeleg, %0" : : "r" (x)); +} + +// Machine Interrupt Delegation +static inline uint64_t riscv_get_mideleg() +{ + uint64_t x; + asm volatile("csrr %0, mideleg" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_mideleg(uint64_t x) +{ + asm volatile("csrw mideleg, %0" : : "r" (x)); +} + +// Supervisor Trap-Vector Base Address +// low two bits are mode. +static inline void riscv_writ_stvec(uint64_t x) +{ + asm volatile("csrw stvec, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_stvec() +{ + uint64_t x; + asm volatile("csrr %0, stvec" : "=r" (x) ); + return x; +} + +// Supervisor Timer Comparison Register +static inline uint64_t riscv_get_stimecmp() +{ + uint64_t x; + // asm volatile("csrr %0, stimecmp" : "=r" (x) ); + asm volatile("csrr %0, 0x14d" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_stimecmp(uint64_t x) +{ + // asm volatile("csrw stimecmp, %0" : : "r" (x)); + asm volatile("csrw 0x14d, %0" : : "r" (x)); +} + +// Machine Environment Configuration Register +static inline uint64_t riscv_get_menvcfg() +{ + uint64_t x; + // asm volatile("csrr %0, menvcfg" : "=r" (x) ); + asm volatile("csrr %0, 0x30a" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_menvcfg(uint64_t x) +{ + // asm volatile("csrw menvcfg, %0" : : "r" (x)); + asm volatile("csrw 0x30a, %0" : : "r" (x)); +} + +// Physical Memory Protection +static inline void riscv_writ_pmpcfg0(uint64_t x) +{ + asm volatile("csrw pmpcfg0, %0" : : "r" (x)); +} + +static inline void riscv_writ_pmpaddr0(uint64_t x) +{ + asm volatile("csrw pmpaddr0, %0" : : "r" (x)); +} + +// supervisor address translation and protection; +// holds the address of the page table. +static inline void riscv_writ_satp(uint64_t x) +{ + asm volatile("csrw satp, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_satp() +{ + uint64_t x; + asm volatile("csrr %0, satp" : "=r" (x) ); + return x; +} + +// Supervisor Trap Cause +static inline uint64_t riscv_get_scause() +{ + uint64_t x; + asm volatile("csrr %0, scause" : "=r" (x) ); + return x; +} + +// Supervisor Trap Value +static inline uint64_t riscv_get_stval() +{ + uint64_t x; + asm volatile("csrr %0, stval" : "=r" (x) ); + return x; +} + +// Machine-mode Counter-Enable +static inline void riscv_writ_mcounteren(uint64_t x) +{ + asm volatile("csrw mcounteren, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_mcounteren() +{ + uint64_t x; + asm volatile("csrr %0, mcounteren" : "=r" (x) ); + return x; +} + +// machine-mode cycle counter +static inline uint64_t riscv_get_time() +{ + uint64_t x; + asm volatile("csrr %0, time" : "=r" (x) ); + return x; +} + +// enable device interrupts +static inline void riscv_sintr_on() +{ + uint64_t sstatus = riscv_get_sstatus(); + sstatus |= SSTATUS_SIE; + riscv_writ_sstatus(sstatus); +} + +// disable device interrupts +static inline void riscv_sintr_off() +{ + uint64_t sstatus = riscv_get_sstatus(); + sstatus &= (~SSTATUS_SIE); + riscv_writ_sstatus(sstatus); +} + +// are device interrupts enabled? +static inline int riscv_sintr_get() +{ + uint64_t x = riscv_get_sstatus(); + return (x & SSTATUS_SIE) != 0; +} + +static inline void riscv_sintr_restore(int x) +{ + if(x) + riscv_sintr_on(); + else + riscv_sintr_off(); +} + +// enable device interrupts +static inline void riscv_mintr_on() +{ + uint64_t mstatus = riscv_get_mstatus(); + mstatus |= MSTATUS_MIE; + riscv_writ_mstatus(mstatus); +} + +// disable device interrupts +static inline void riscv_mintr_off() +{ + uint64_t mstatus = riscv_get_mstatus(); + mstatus &= (~MSTATUS_MIE); + riscv_writ_mstatus(mstatus); +} + +// are device interrupts enabled? +static inline int riscv_mintr_get() +{ + uint64_t x = riscv_get_mstatus(); + return (x & MSTATUS_MIE) != 0; +} + +static inline void riscv_mintr_restore(int x) +{ + if(x) + riscv_mintr_on(); + else + riscv_mintr_off(); +} + +static inline uint64_t riscv_get_sp() +{ + uint64_t x; + asm volatile("mv %0, sp" : "=r" (x) ); + return x; +} + +// read and write tp, the thread pointer, which xv6 uses to hold +// this core's hartid (core number), the index into cpus[]. +static inline uint64_t riscv_get_tp() +{ + uint64_t x; + asm volatile("mv %0, tp" : "=r" (x) ); + return x; +} + +static inline void riscv_writ_tp(uint64_t x) +{ + asm volatile("mv tp, %0" : : "r" (x)); +} + +static inline uint64_t riscv_get_ra() +{ + uint64_t x; + asm volatile("mv %0, ra" : "=r" (x) ); + return x; +} + +// flush the TLB. +static inline void sfence_vma() +{ + // the zero, zero means flush all TLB entries. + asm volatile("sfence.vma zero, zero"); +} + +#endif // __ASSEMBLER__ + +#endif diff --git a/port/threadx/inc/nx_port.h b/port/threadx/inc/nx_port.h new file mode 100644 index 0000000..d9a3752 --- /dev/null +++ b/port/threadx/inc/nx_port.h @@ -0,0 +1,194 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * Copyright (c) 2025-present Eclipse ThreadX Contributors + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** NetX Component */ +/** */ +/** Port Specific */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +/**************************************************************************/ +/* */ +/* PORT SPECIFIC C INFORMATION RELEASE */ +/* */ +/* nx_port.h PIC32x/Microchip */ +/* 6.4.3 */ +/* */ +/* AUTHOR */ +/* */ +/* William E. Lamie, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This file contains data type definitions that make the NetX */ +/* real-time TCP/IP function identically on a variety of different */ +/* processor architectures. */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 10-31-2022 Zhen Kong Initial PIC32x/Microchip */ +/* Support Version 6.2.0 */ +/* */ +/**************************************************************************/ + +#ifndef NX_PORT_H +#define NX_PORT_H +#include "tx_port.h" +/* Determine if the optional NetX user define file should be used. */ + +#ifdef NX_INCLUDE_USER_DEFINE_FILE + +/* Yes, include the user defines in nx_user.h. The defines in this file may + alternately be defined on the command line. */ + +#include "nx_user.h" +#endif + +/* Default to little endian, since this is what most RISC-V targets are. */ + +#define NX_LITTLE_ENDIAN + +/* Define various constants for the port. */ + +#ifndef NX_IP_PERIODIC_RATE +#define NX_IP_PERIODIC_RATE \ + 10 /* Default IP periodic rate of 1 second for \ + ports with 1ms timer interrupts. This \ + value may be defined instead at the \ + command line and this value will not be \ + used. */ +#endif + +/* Define macros that swap the endian for little endian ports. */ +#ifdef NX_LITTLE_ENDIAN +#define NX_CHANGE_ULONG_ENDIAN(arg) \ + { \ + ULONG _i; \ + ULONG _tmp; \ + _i = (UINT)arg; \ + /* _i = A, B, C, D */ \ + _tmp = _i ^ (((_i) >> 16) | (_i << 16)); \ + /* _tmp = _i ^ (_i ROR 16) = A^C, B^D, C^A, D^B */ \ + _tmp &= 0xff00ffff; \ + /* _tmp = A^C, 0, C^A, D^B */ \ + _i = ((_i) >> 8) | (_i << 24); \ + /* _i = D, A, B, C */ \ + _i = _i ^ ((_tmp) >> 8); \ + /* _i = D, C, B, A */ \ + arg = _i; \ + } +#define NX_CHANGE_USHORT_ENDIAN(a) (a = (((a >> 8) | (a << 8)) & 0xFFFF)) + +#ifndef htonl +#define htonl(val) NX_CHANGE_ULONG_ENDIAN(val) +#endif /* htonl */ + +#ifndef ntohl +#define ntohl(val) NX_CHANGE_ULONG_ENDIAN(val) +#endif /* ntohl */ + +#ifndef htons +#define htons(val) NX_CHANGE_USHORT_ENDIAN(val) +#endif /*htons */ + +#ifndef ntohs +#define ntohs(val) NX_CHANGE_USHORT_ENDIAN(val) +#endif /*ntohs */ + +#else + +#define NX_CHANGE_ULONG_ENDIAN(a) +#define NX_CHANGE_USHORT_ENDIAN(a) + +#ifndef htons +#define htons(val) (val) +#endif /* htons */ + +#ifndef ntohs +#define ntohs(val) (val) +#endif /* ntohs */ + +#ifndef ntohl +#define ntohl(val) (val) +#endif + +#ifndef htonl +#define htonl(val) (val) +#endif /* htonl */ + +#endif + +/* Define several macros for the error checking shell in NetX. */ + +#ifndef TX_TIMER_PROCESS_IN_ISR + +#define NX_CALLER_CHECKING_EXTERNS \ + extern TX_THREAD* _tx_thread_current_ptr; \ + extern TX_THREAD _tx_timer_thread; \ + extern volatile ULONG TX_THREAD_GET_SYSTEM_STATE(); + +#define NX_THREADS_ONLY_CALLER_CHECKING \ + if((TX_THREAD_GET_SYSTEM_STATE()) || (_tx_thread_current_ptr == TX_NULL) || (_tx_thread_current_ptr == &_tx_timer_thread)) \ + return (NX_CALLER_ERROR); + +#define NX_INIT_AND_THREADS_CALLER_CHECKING \ + if(((TX_THREAD_GET_SYSTEM_STATE()) && (TX_THREAD_GET_SYSTEM_STATE() < ((ULONG)0xF0F0F0F0))) || \ + (_tx_thread_current_ptr == &_tx_timer_thread)) \ + return (NX_CALLER_ERROR); + +#define NX_NOT_ISR_CALLER_CHECKING \ + if((TX_THREAD_GET_SYSTEM_STATE()) && (TX_THREAD_GET_SYSTEM_STATE() < ((ULONG)0xF0F0F0F0))) \ + return (NX_CALLER_ERROR); + +#define NX_THREAD_WAIT_CALLER_CHECKING \ + if((wait_option) && \ + ((_tx_thread_current_ptr == NX_NULL) || (TX_THREAD_GET_SYSTEM_STATE()) || (_tx_thread_current_ptr == &_tx_timer_thread))) \ + return (NX_CALLER_ERROR); + +#else + +#define NX_CALLER_CHECKING_EXTERNS \ + extern TX_THREAD* _tx_thread_current_ptr; \ + extern volatile ULONG TX_THREAD_GET_SYSTEM_STATE(); + +#define NX_THREADS_ONLY_CALLER_CHECKING \ + if((TX_THREAD_GET_SYSTEM_STATE()) || (_tx_thread_current_ptr == TX_NULL)) \ + return (NX_CALLER_ERROR); + +#define NX_INIT_AND_THREADS_CALLER_CHECKING \ + if(((TX_THREAD_GET_SYSTEM_STATE()) && (TX_THREAD_GET_SYSTEM_STATE() < ((ULONG)0xF0F0F0F0)))) \ + return (NX_CALLER_ERROR); + +#define NX_NOT_ISR_CALLER_CHECKING \ + if((TX_THREAD_GET_SYSTEM_STATE()) && (TX_THREAD_GET_SYSTEM_STATE() < ((ULONG)0xF0F0F0F0))) \ + return (NX_CALLER_ERROR); + +#define NX_THREAD_WAIT_CALLER_CHECKING \ + if((wait_option) && ((_tx_thread_current_ptr == NX_NULL) || (TX_THREAD_GET_SYSTEM_STATE()))) \ + return (NX_CALLER_ERROR); + +#endif + +/* Define the version ID of NetX. This may be utilized by the application. */ + +#ifdef NX_SYSTEM_INIT +CHAR _nx_version_id[] = "Copyright (c) 2024 Microsoft Corporation. * NetX Duo PIC32x/MPLAB Version 6.4.1 *"; +#else +extern CHAR _nx_version_id[]; +#endif + +#endif \ No newline at end of file diff --git a/port/threadx/inc/nx_user.h b/port/threadx/inc/nx_user.h new file mode 100644 index 0000000..35a1787 --- /dev/null +++ b/port/threadx/inc/nx_user.h @@ -0,0 +1,785 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * Copyright (c) 2025-present Eclipse ThreadX Contributors + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** NetX Component */ +/** */ +/** User Specific */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +/**************************************************************************/ +/* */ +/* PORT SPECIFIC C INFORMATION RELEASE */ +/* */ +/* nx_user.h PORTABLE C */ +/* 6.4.3 */ +/* */ +/* AUTHOR */ +/* */ +/* Yuxin Zhou, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This file contains user defines for configuring NetX in specific */ +/* ways. This file will have an effect only if the application and */ +/* NetX library are built with NX_INCLUDE_USER_DEFINE_FILE defined. */ +/* Note that all the defines in this file may also be made on the */ +/* command line when building NetX library and application objects. */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 05-19-2020 Yuxin Zhou Initial Version 6.0 */ +/* 09-30-2020 Yuxin Zhou Modified comment(s), */ +/* resulting in version 6.1 */ +/* 08-02-2021 Yuxin Zhou Modified comment(s), and */ +/* supported TCP/IP offload, */ +/* resulting in version 6.1.8 */ +/* 04-25-2022 Yuxin Zhou Modified comment(s), */ +/* resulting in version 6.1.11 */ +/* 10-31-2023 Tiejun Zhou Modified comment(s), */ +/* supported random IP id, */ +/* resulting in version 6.3.0 */ +/* */ +/**************************************************************************/ + +#ifndef NX_USER_H +#define NX_USER_H + +#if __riscv_xlen == 64 +#define NX_CRYPTO_HUGE_NUMBER_BITS 16 +#else +#define NX_CRYPTO_HUGE_NUMBER_BITS 32 +#endif +/* Define various build options for the NetX Duo port. The application should either make changes + here by commenting or un-commenting the conditional compilation defined OR supply the defines + though the compiler's equivalent of the -D option. */ + +/* Override various options with default values already assigned in nx_api.h or nx_port.h. Please + also refer to nx_port.h for descriptions on each of these options. */ + +/* Configuration options for Interface */ + +/* NX_MAX_PHYSICAL_INTERFACES defines the number physical network interfaces + present to NetX Duo IP layer. Physical interface does not include + loopback interface. By default there is at least one physical interface + in the system. */ +/* +#define NX_MAX_PHYSICAL_INTERFACES 1 +*/ + +/* Defined, this option disables NetX Duo support on the 127.0.0.1 loopback interface. + 127.0.0.1 loopback interface is enabled by default. Uncomment out the follow code to disable + the loopback interface. */ +/* +#define NX_DISABLE_LOOPBACK_INTERFACE +*/ + +/* If defined, the link driver is able to specify extra capability, such as checksum offloading features. */ +/* +#define NX_ENABLE_INTERFACE_CAPABILITY +*/ + +/* Configuration options for IP */ + +/* This defines specifies the number of ThreadX timer ticks in one second. The default value is based + on ThreadX timer interrupt. */ +/* +#ifdef TX_TIMER_TICKS_PER_SECOND +#define NX_IP_PERIODIC_RATE TX_TIMER_TICKS_PER_SECOND +#else +#define NX_IP_PERIODIC_RATE 100 +#endif +*/ + +/* Defined, NX_ENABLE_IP_RAW_PACKET_FILTER allows an application to install a filter + for incoming raw packets. This feature is disabled by default. */ +/* +#define NX_ENABLE_IP_RAW_PACKET_FILTER +*/ + +/* This define specifies the maximum number of RAW packets can be queued for receive. The default + value is 20. */ +/* +#define NX_IP_RAW_MAX_QUEUE_DEPTH 20 +*/ + +/* Defined, this option enables IP static routing feature. By default IP static routing + feature is not compiled in. */ +/* +#define NX_ENABLE_IP_STATIC_ROUTING +*/ + +/* This define specifies the size of IP routing table. The default value is 8. */ +/* +#define NX_IP_ROUTING_TABLE_SIZE 8 +*/ + +/* Defined, this option enables random IP id. By default IP id is increased by one for each packet. */ +/* +#define NX_ENABLE_IP_ID_RANDOMIZATION +*/ + +/* This define specifies the maximum number of multicast groups that can be joined. + The default value is 7. */ +/* +#define NX_MAX_MULTICAST_GROUPS 7 +*/ + +/* Configuration options for IPv6 */ + +/* Disable IPv6 processing in NetX Duo. */ +/* +#define NX_DISABLE_IPV6 +*/ + +/* Define the number of entries in IPv6 address pool. */ +/* +#ifdef NX_MAX_PHYSICAL_INTERFACES +#define NX_MAX_IPV6_ADDRESSES (NX_MAX_PHYSICAL_INTERFACES * 3) +#endif +*/ + +/* Do not process IPv6 ICMP Redirect Messages. */ +/* +#define NX_DISABLE_ICMPV6_REDIRECT_PROCESS +*/ + +/* Do not process IPv6 Router Advertisement Messages. */ +/* +#define NX_DISABLE_ICMPV6_ROUTER_ADVERTISEMENT_PROCESS +*/ + +/* Do not send IPv6 Router Solicitation Messages. */ +/* +#define NX_DISABLE_ICMPV6_ROUTER_SOLICITATION +*/ + +/* Define the max number of router solicitations a host sends until a router response + is received. If no response is received, the host concludes no router is present. */ +/* +#define NX_ICMPV6_MAX_RTR_SOLICITATIONS 3 +*/ + +/* Define the interval between which the host sends router solicitations in seconds. */ +/* +#define NX_ICMPV6_RTR_SOLICITATION_INTERVAL 4 +*/ + +/* Define the maximum delay for the initial router solicitation in seconds. */ +/* +#define NX_ICMPV6_RTR_SOLICITATION_DELAY 1 +*/ + +/* Do not send ICMPv4 Error Messages. */ +/* +#define NX_DISABLE_ICMPV4_ERROR_MESSAGE +*/ + +/* Do not send ICMPv6 Error Messages. */ +/* +#define NX_DISABLE_ICMPV6_ERROR_MESSAGE +*/ + +/* Disable the Duplicate Address Detection (DAD) protocol when configuring the host IP address. */ +/* +#define NX_DISABLE_IPV6_DAD +*/ + +/* If defined, application is able to control whether or not to perform IPv6 stateless + address autoconfiguration with nxd_ipv6_stateless_address_autoconfig_enable() or + nxd_ipv6_stateless_address_autoconfig_disable() service. If defined, the system starts + with IPv6 stateless address autoconfiguration enabled. This feature is disabled by default. */ +/* +#define NX_IPV6_STATELESS_AUTOCONFIG_CONTROL +*/ + +/* If enabled, application is able to install a callback function to get notified + when an interface IPv6 address is changed. By default this feature is disabled. */ +/* +#define NX_ENABLE_IPV6_ADDRESS_CHANGE_NOTIFY +*/ + +/* Defined, this option prevents NetX Duo from removing stale (old) cache table entries + whose timeout has not expired so are otherwise still valid) to make room for new entries + when the table is full. Static and router entries are not purged. */ +/* +#define NX_DISABLE_IPV6_PURGE_UNUSED_CACHE_ENTRIES +*/ + +/* This define enables simple IPv6 multicast group join/leave function. By default + the IPv6 multicast join/leave function is not enabled. */ +/* +#define NX_ENABLE_IPV6_MULTICAST +*/ + +/* Defined, Minimum Path MTU Discovery feature is enabled. */ +/* +#define NX_ENABLE_IPV6_PATH_MTU_DISCOVERY +*/ + +/* Define wait interval in seconds to reset the path MTU for a destination + table entry after decreasing it in response to a packet too big error message. + RFC 1981 Section 5.4 states the minimum time to wait is + 5 minutes and recommends 10 minutes. +*/ +/* +#define NX_PATH_MTU_INCREASE_WAIT_INTERVAL 600 +*/ + +/* Configuration options for Neighbor Discovery. */ +/* Define values used for Neighbor Discovery protocol. + The default values are suggested by RFC2461, chapter 10. */ + +/* Define the maximum number of multicast Neighbor Solicitation packets + NetX Duo sends for a packet destination needing physical mapping + to the IP address. */ +/* +#define NX_MAX_MULTICAST_SOLICIT 3 +*/ + +/* Define the maximum number of unicast Neighbor Solicitation packets + NetX Duo sends for a cache entry whose reachable time has expired + and gone "stale". */ +/* +#define NX_MAX_UNICAST_SOLICIT 3 +*/ + +/* Define the length of time, in seconds, that a Neighbor Cache table entry + remains in the reachable state before it becomes state. */ +/* +#define NX_REACHABLE_TIME 30 +*/ + +/* Define the length of time, in milliseconds, between retransmitting + Neighbor Solicitation (NS) packets. */ +/* +#define NX_RETRANS_TIMER 1000 +*/ + +/* Define the length of time, in seconds, for a Neighbor Cache entry + to remain in the Delay state. This is the Delay first probe timer. */ +/* +#define NX_DELAY_FIRST_PROBE_TIME 5 +*/ + +/* This defines specifies the maximum number of packets that can be queued while waiting for a + Neighbor Discovery to resolve an IPv6 address. The default value is 4. */ +/* +#define NX_ND_MAX_QUEUE_DEPTH 4 +*/ + +/* Define the maximum ICMPv6 Duplicate Address Detect Transmit . */ +/* +#define NX_IPV6_DAD_TRANSMITS 3 +*/ + +/* Define the number of neighbor cache entries. */ +/* +#define NX_IPV6_NEIGHBOR_CACHE_SIZE 16 +*/ + +/* Define the size of the IPv6 destination table. */ +/* +#define NX_IPV6_DESTINATION_TABLE_SIZE 8 +*/ + +/* Define the size of the IPv6 prefix table. */ +/* +#define NX_IPV6_PREFIX_LIST_TABLE_SIZE 8 +*/ + +/* Configuration options for IPSEC */ + +/* This define enables IPSEC in NetX Duo. */ +/* +#define NX_IPSEC_ENABLE +*/ + +/* Configuration options for NAT */ + +/* This define enables NAT process in NetX Duo. */ +/* +#define NX_NAT_ENABLE +*/ + +/* Configuration options for IGMP */ + +/* Defined, IGMP v2 support is disabled. By default NetX Duo + is built with IGMPv2 enabled . By uncommenting this option, + NetX Duo reverts back to IGMPv1 only. */ +/* +#define NX_DISABLE_IGMPV2 +*/ + +/* Configuration options for ARP */ + +/* When defines, ARP reply is sent when address conflict occurs. */ +/* +#define NX_ARP_DEFEND_BY_REPLY +*/ + +/* To use the ARP collision handler to check for invalid ARP messages + matching existing entries in the table (man in the middle attack), + enable this feature. */ +/* +#define NX_ENABLE_ARP_MAC_CHANGE_NOTIFICATION +*/ + +/* This define specifies the number of seconds ARP entries remain valid. The default value of 0 disables + aging of ARP entries. */ +/* +#define NX_ARP_EXPIRATION_RATE 0 +*/ + +/* This define specifies the number of seconds between ARP retries. The default value is 10, which represents + 10 seconds. */ +/* +#define NX_ARP_UPDATE_RATE 10 +*/ + +/* This define specifies the maximum number of ARP retries made without an ARP response. The default + value is 18. */ +/* +#define NX_ARP_MAXIMUM_RETRIES 18 +*/ + +/* This defines specifies the maximum number of packets that can be queued while waiting for an ARP + response. The default value is 4. */ +/* +#define NX_ARP_MAX_QUEUE_DEPTH 4 +*/ + +/* Defined, this option disables entering ARP request information in the ARP cache. */ +/* +#define NX_DISABLE_ARP_AUTO_ENTRY +*/ + +/* Define the ARP defend interval. The default value is 10 seconds. */ +/* +#define NX_ARP_DEFEND_INTERVAL 10 +*/ + +/* Configuration options for TCP */ + +/* This define specifies how the number of system ticks (NX_IP_PERIODIC_RATE) is divided to calculate the + timer rate for the TCP delayed ACK processing. The default value is 5, which represents 200ms. */ +/* +#define NX_TCP_ACK_TIMER_RATE 5 +*/ + +/* This define specifies how the number of system ticks (NX_IP_PERIODIC_RATE) is divided to calculate the + fast TCP timer rate. The fast TCP timer is used to drive various TCP timers, including the delayed ACK + timer. The default value is 10, which represents 100ms. */ +/* +#define NX_TCP_FAST_TIMER_RATE 10 +*/ + +/* This define specifies how the number of system ticks (NX_IP_PERIODIC_RATE) is divided to calculate the + timer rate for the TCP transmit retry processing. The default value is 1, which represents 1 second. */ +/* +#define NX_TCP_TRANSMIT_TIMER_RATE 1 +*/ + +/* This define specifies how many seconds of inactivity before the keepalive timer activates. The default + value is 7200, which represents 2 hours. */ +/* +#define NX_TCP_KEEPALIVE_INITIAL 7200 +*/ + +/* This define specifies how many seconds between retries of the keepalive timer assuming the other side + of the connection is not responding. The default value is 75, which represents 75 seconds between + retries. */ +/* +#define NX_TCP_KEEPALIVE_RETRY 75 +*/ + +/* This define specifies the maximum packets that are out of order. The default value is 8. */ +/* +#define NX_TCP_MAX_OUT_OF_ORDER_PACKETS 8 +*/ + +/* This define specifies the maximum number of TCP server listen requests. The default value is 10. */ +/* +#define NX_MAX_LISTEN_REQUESTS 10 +*/ + +/* Defined, this option enables the optional TCP keepalive timer. */ +/* +#define NX_ENABLE_TCP_KEEPALIVE +*/ + +/* Defined, this option enables the optional TCP immediate ACK response processing. */ +/* +#define NX_TCP_IMMEDIATE_ACK +*/ + +/* This define specifies the number of TCP packets to receive before sending an ACK. */ +/* The default value is 2: ack every 2 packets. */ +/* +#define NX_TCP_ACK_EVERY_N_PACKETS 2 +*/ + +/* Automatically define NX_TCP_ACK_EVERY_N_PACKETS to 1 if NX_TCP_IMMEDIATE_ACK is defined. + This is needed for backward compatibility. */ +#if(defined(NX_TCP_IMMEDIATE_ACK) && !defined(NX_TCP_ACK_EVERY_N_PACKETS)) +#define NX_TCP_ACK_EVERY_N_PACKETS 1 +#endif + +/* This define specifies how many transmit retires are allowed before the connection is deemed broken. + The default value is 10. */ +/* +#define NX_TCP_MAXIMUM_RETRIES 10 +*/ + +/* This define specifies the maximum depth of the TCP transmit queue before TCP send requests are + suspended or rejected. The default value is 20, which means that a maximum of 20 packets can be in + the transmit queue at any given time. */ +/* +#define NX_TCP_MAXIMUM_TX_QUEUE 20 +*/ + +/* This define specifies how the retransmit timeout period changes between successive retries. If this + value is 0, the initial retransmit timeout is the same as subsequent retransmit timeouts. If this + value is 1, each successive retransmit is twice as long. The default value is 0. */ +/* +#define NX_TCP_RETRY_SHIFT 0 +*/ + +/* This define specifies how many keepalive retries are allowed before the connection is deemed broken. + The default value is 10. */ +/* +#define NX_TCP_KEEPALIVE_RETRIES 10 +*/ + +/* Defined, this option enables the TCP window scaling feature. (RFC 1323). Default disabled. */ +/* +#define NX_ENABLE_TCP_WINDOW_SCALING +*/ + +/* Defined, this option disables the reset processing during disconnect when the timeout value is + specified as NX_NO_WAIT. */ +/* +#define NX_DISABLE_RESET_DISCONNECT +*/ + +/* If defined, the incoming SYN packet (connection request) is checked for a minimum acceptable + MSS for the host to accept the connection. The default minimum should be based on the host + application packet pool payload, socket transmit queue depth and relevant application specific parameters. */ +/* +#define NX_ENABLE_TCP_MSS_CHECK +#define NX_TCP_MSS_MINIMUM 128 +*/ + +/* If defined, NetX Duo has a notify callback for the transmit TCP socket queue decreased from + the maximum queue depth. */ +/* +#define NX_ENABLE_TCP_QUEUE_DEPTH_UPDATE_NOTIFY +*/ + +/* Defined, feature of low watermark is enabled. */ +/* +#define NX_ENABLE_LOW_WATERMARK +*/ + +/* Define the maximum receive queue for TCP socket. */ +/* +#ifdef NX_ENABLE_LOW_WATERMARK +#define NX_TCP_MAXIMUM_RX_QUEUE 20 +#endif +*/ + +/* Configuration options for fragmentation */ + +/* Defined, this option disables both IPv4 and IPv6 fragmentation and reassembly logic. */ +/* +#define NX_DISABLE_FRAGMENTATION +*/ + +/* Defined, this option process IP fragmentation immediately. */ +/* +#define NX_FRAGMENT_IMMEDIATE_ASSEMBLY +*/ + +/* This define specifies the maximum time of IP reassembly. The default value is 60. + By default this option is not defined. */ +/* +#define NX_IP_MAX_REASSEMBLY_TIME 60 +*/ + +/* This define specifies the maximum time of IPv4 reassembly. The default value is 15. + Note that if NX_IP_MAX_REASSEMBLY_TIME is defined, this option is automatically defined as 60. + By default this option is not defined. */ +/* +#define NX_IPV4_MAX_REASSEMBLY_TIME 15 +*/ + +/* This define specifies the maximum time of IPv6 reassembly. The default value is 60. + Note that if NX_IP_MAX_REASSEMBLY_TIME is defined, this option is automatically defined as 60. + By default this option is not defined. */ +/* +#define NX_IPV6_MAX_REASSEMBLY_TIME 60 +*/ + +/* Configuration options for checksum */ + +/* Defined, this option disables checksum logic on received ICMPv4 packets. + Note that if NX_DISABLE_ICMP_RX_CHECKSUM is defined, this option is + automatically defined. By default this option is not defined.*/ +/* +#define NX_DISABLE_ICMPV4_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on received ICMPv6 packets. + Note that if NX_DISABLE_ICMP_RX_CHECKSUM is defined, this option is + automatically defined. By default this option is not defined.*/ +/* +#define NX_DISABLE_ICMPV6_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on received ICMPv4 or ICMPv6 packets. + Note that if NX_DISABLE_ICMP_RX_CHECKSUM is defined, NX_DISABLE_ICMPV4_RX_CHECKSUM + and NX_DISABLE_ICMPV6_RX_CHECKSUM are automatically defined. */ +/* +#define NX_DISABLE_ICMP_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted ICMPv4 packets. + Note that if NX_DISABLE_ICMP_TX_CHECKSUM is defined, this option is + automatically defined. By default this option is not defined.*/ +/* +#define NX_DISABLE_ICMPV4_TX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted ICMPv6 packets. + Note that if NX_DISABLE_ICMP_TX_CHECKSUM is defined, this option is + automatically defined. By default this option is not defined.*/ +/* +#define NX_DISABLE_ICMPV6_TX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted ICMPv4 or ICMPv6 packets. + Note that if NX_DISABLE_ICMP_TX_CHECKSUM is defined, NX_DISABLE_ICMPV4_TX_CHECKSUM + and NX_DISABLE_ICMPV6_TX_CHECKSUM are automatically defined. */ +/* +#define NX_DISABLE_ICMP_TX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on received IP packets. This is useful if the link-layer + has reliable checksum or CRC logic. */ +/* +#define NX_DISABLE_IP_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted IP packets. */ +/* +#define NX_DISABLE_IP_TX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on received TCP packets. */ +/* +#define NX_DISABLE_TCP_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted TCP packets. */ +/* +#define NX_DISABLE_TCP_TX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on received UDP packets. */ + +/* +#define NX_DISABLE_UDP_RX_CHECKSUM +*/ + +/* Defined, this option disables checksum logic on transmitted UDP packets. Note that + IPV6 requires the UDP checksum computed for outgoing packets. If this option is + defined, the IPv6 NetX Duo host must ensure the UDP checksum is computed elsewhere + before the packet is transmitted. */ +/* +#define NX_DISABLE_UDP_TX_CHECKSUM +*/ + +/* Configuration options for statistics. */ + +/* Defined, ARP information gathering is disabled. */ +/* +#define NX_DISABLE_ARP_INFO +*/ + +/* Defined, IP information gathering is disabled. */ +/* +#define NX_DISABLE_IP_INFO +*/ + +/* Defined, ICMP information gathering is disabled. */ +/* +#define NX_DISABLE_ICMP_INFO +*/ + +/* Defined, IGMP information gathering is disabled. */ +/* +#define NX_DISABLE_IGMP_INFO +*/ + +/* Defined, packet information gathering is disabled. */ +/* +#define NX_DISABLE_PACKET_INFO +*/ + +/* Defined, RARP information gathering is disabled. */ +/* +#define NX_DISABLE_RARP_INFO +*/ + +/* Defined, TCP information gathering is disabled. */ +/* +#define NX_DISABLE_TCP_INFO +*/ + +/* Defined, UDP information gathering is disabled. */ +/* +#define NX_DISABLE_UDP_INFO +*/ + +/* Configuration options for Packet Pool */ + +/* This define specifies the size of the physical packet header. The default value is 16 (based on + a typical 16-byte Ethernet header). */ +/* +#define NX_PHYSICAL_HEADER 16 +*/ + +/* This define specifies the size of the physical packet trailer and is typically used to reserve storage + for things like Ethernet CRCs, etc. */ +/* +#define NX_PHYSICAL_TRAILER 4 +*/ + +/* Defined, this option disables the addition size checking on received packets. */ +/* +#define NX_DISABLE_RX_SIZE_CHECKING +*/ + +/* Defined, packet debug infromation is enabled. */ +/* +#define NX_ENABLE_PACKET_DEBUG_INFO +*/ + +/* Defined, NX_PACKET structure is padded for alignment purpose. The default is no padding. */ +/* +#define NX_PACKET_HEADER_PAD +#define NX_PACKET_HEADER_PAD_SIZE 1 +*/ + +/* Defined, packet header and payload are aligned automatically by the value. The default value is sizeof(ULONG). */ +/* +#define NX_PACKET_ALIGNMENT sizeof(ULONG) +*/ + +/* If defined, the packet chain feature is removed. */ +/* +#define NX_DISABLE_PACKET_CHAIN +*/ + +/* Defined, the IP instance manages two packet pools. */ +/* +#define NX_ENABLE_DUAL_PACKET_POOL +*/ + +/* Configuration options for Others */ + +/* Defined, this option bypasses the basic NetX error checking. This define is typically used + after the application is fully debugged. */ +/* +#define NX_DISABLE_ERROR_CHECKING +*/ + +/* Defined, this option enables deferred driver packet handling. This allows the driver to place a raw + packet on the IP instance and have the driver's real processing routine called from the NetX internal + IP helper thread. */ +/* +#define NX_DRIVER_DEFERRED_PROCESSING +*/ + +/* Defined, the source address of incoming packet is checked. The default is disabled. */ +/* +#define NX_ENABLE_SOURCE_ADDRESS_CHECK +*/ + +/* Defined, the extended notify support is enabled. This feature adds additional callback/notify services + to NetX Duo API for notifying the application of socket events, such as TCP connection and disconnect + completion. These extended notify functions are mainly used by the BSD wrapper. The default is this + feature is disabled. */ +/* +#define NX_ENABLE_EXTENDED_NOTIFY_SUPPORT +*/ + +/* Defined, ASSERT is disabled. The default is enabled. */ +/* +#define NX_DISABLE_ASSERT +*/ + +/* Define the process when assert fails. */ +/* +#define NX_ASSERT_FAIL while (1) tx_thread_sleep(NX_WAIT_FOREVER); +*/ + +/* Defined, the IPv4 feature is disabled. */ +/* +#define NX_DISABLE_IPV4 +*/ + +/* Defined, the destination address of ICMP packet is checked. The default is disabled. + An ICMP Echo Request destined to an IP broadcast or IP multicast address will be silently discarded. +*/ +/* +#define NX_ENABLE_ICMP_ADDRESS_CHECK +*/ + +/* Define the max string length. The default value is 1024. */ +/* +#define NX_MAX_STRING_LENGTH 1024 +*/ + +/* Defined, the TCP/IP offload feature is enabled. + NX_ENABLE_INTERFACE_CAPABILITY must be defined to enable this feature. */ +/* +#define NX_ENABLE_TCPIP_OFFLOAD +*/ + +/* Defined, the VLAN feature is enabled. + Note: Require driver support to use APIs from this file. + A quick check in driver is to search for + NX_LINK_RAW_PACKET_SEND. VLAN APIs are not supported if not found. */ +/* +#define NX_ENABLE_VLAN +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +int rand(void); +void srand(unsigned seed); + +#ifdef __cplusplus +} +#endif + +#define NX_RAND rand +#define NX_SRAND srand +#endif diff --git a/port/threadx/inc/tx_port.h b/port/threadx/inc/tx_port.h new file mode 100644 index 0000000..cee20a7 --- /dev/null +++ b/port/threadx/inc/tx_port.h @@ -0,0 +1,279 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Port Specific */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +/**************************************************************************/ +/* */ +/* PORT SPECIFIC C INFORMATION RELEASE */ +/* */ +/* tx_port.h RISC-V64/GNU */ +/* 6.2.1 */ +/* */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This file contains data type definitions that make the ThreadX */ +/* real-time kernel function identically on a variety of different */ +/* processor architectures. For example, the size or number of bits */ +/* in an "int" data type vary between microprocessor architectures and */ +/* even C compilers for the same microprocessor. ThreadX does not */ +/* directly use native C data types. Instead, ThreadX creates its */ +/* own special types that can be mapped to actual data types by this */ +/* file to guarantee consistency in the interface and functionality. */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ + +#ifndef TX_PORT_H +#define TX_PORT_H + +#ifdef __ASSEMBLER__ + +#if __riscv_xlen == 64 +#define SLL32 sllw +#define STORE sd +#define LOAD ld +#define LWU lwu +#define LOG_REGBYTES 3 +#else +#define SLL32 sll +#define STORE sw +#define LOAD lw +#define LWU lw +#define LOG_REGBYTES 2 +#endif +#define REGBYTES (1 << LOG_REGBYTES) +#define TX_THREAD_STACK_END_OFFSET 2*4 + 2*REGBYTES +#define TX_THREAD_TIME_SLICE_OFFSET 3*4+ 3*REGBYTES + +#else /*not __ASSEMBLER__ */ + +/* Include for memset. */ +#include +/* include for strtoul*/ +#include + +/* Determine if the optional ThreadX user define file should be used. */ + +#ifdef TX_INCLUDE_USER_DEFINE_FILE + +/* Yes, include the user defines in tx_user.h. The defines in this file may + alternately be defined on the command line. */ + +#include "nx_user.h" +#include "tx_user.h" +#endif + +/* Define compiler library include files. */ + +/* Define ThreadX basic types for this port. */ + +#define VOID void +typedef char CHAR; +typedef unsigned char UCHAR; +typedef int INT; +typedef unsigned int UINT; +typedef int LONG; +typedef unsigned int ULONG; +typedef unsigned long long ULONG64; +typedef short SHORT; +typedef unsigned short USHORT; +#define ULONG64_DEFINED +#define ALIGN_TYPE_DEFINED +#define ALIGN_TYPE ULONG64 + +/* Define the priority levels for ThreadX. Legal values range + from 32 to 1024 and MUST be evenly divisible by 32. */ + +#ifndef TX_MAX_PRIORITIES +#define TX_MAX_PRIORITIES 32 +#endif + +/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during + thread creation is less than this value, the thread create call will return an error. */ + +#ifndef TX_MINIMUM_STACK +#define TX_MINIMUM_STACK 1024 /* Minimum stack size for this port */ +#endif + +/* Define the system timer thread's default stack size and priority. These are only applicable + if TX_TIMER_PROCESS_IN_ISR is not defined. */ + +#ifndef TX_TIMER_THREAD_STACK_SIZE +#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */ +#endif + +#ifndef TX_TIMER_THREAD_PRIORITY +#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */ +#endif + +/* Define various constants for the ThreadX RISC-V port. */ + +#define TX_INT_DISABLE 0x00000000 /* Disable interrupts value */ +#define TX_INT_ENABLE 0x00000008 /* Enable interrupt value */ + +/* Define the clock source for trace event entry time stamp. The following two item are port specific. + For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock + source constants would be: + +#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024) +#define TX_TRACE_TIME_MASK 0x0000FFFFUL + +*/ + +#ifndef TX_TRACE_TIME_SOURCE +#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time +#endif +#ifndef TX_TRACE_TIME_MASK +#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL +#endif + +/* Define the port specific options for the _tx_build_options variable. This variable indicates + how the ThreadX library was built. */ + +#define TX_PORT_SPECIFIC_BUILD_OPTIONS 0 + +/* Define the in-line initialization constant so that modules with in-line + initialization capabilities can prevent their initialization from being + a function call. */ + +#define TX_INLINE_INITIALIZATION + +/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is + disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack + checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING + define is negated, thereby forcing the stack fill which is necessary for the stack checking + logic. */ + +#ifdef TX_ENABLE_STACK_CHECKING +#undef TX_DISABLE_STACK_FILLING +#endif + +/* Define the TX_THREAD control block extensions for this port. The main reason + for the multiple macros is so that backward compatibility can be maintained with + existing ThreadX kernel awareness modules. */ + +#define TX_THREAD_EXTENSION_0 +#define TX_THREAD_EXTENSION_1 +#define TX_THREAD_EXTENSION_2 +#define TX_THREAD_EXTENSION_3 + +/* Define the port extensions of the remaining ThreadX objects. */ + +#define TX_BLOCK_POOL_EXTENSION +#define TX_BYTE_POOL_EXTENSION +#define TX_EVENT_FLAGS_GROUP_EXTENSION +#define TX_MUTEX_EXTENSION +#define TX_QUEUE_EXTENSION +#define TX_SEMAPHORE_EXTENSION +#define TX_TIMER_EXTENSION + +/* Define the user extension field of the thread control block. Nothing + additional is needed for this port so it is defined as white space. */ + +#ifndef TX_THREAD_USER_EXTENSION +#define TX_THREAD_USER_EXTENSION +#endif + +/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete, + tx_thread_shell_entry, and tx_thread_terminate. */ + +#define TX_THREAD_CREATE_EXTENSION(thread_ptr) +#define TX_THREAD_DELETE_EXTENSION(thread_ptr) +#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) +#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) + +/* Define the ThreadX object creation extensions for the remaining objects. */ + +#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr) +#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr) +#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr) +#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr) +#define TX_QUEUE_CREATE_EXTENSION(queue_ptr) +#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr) +#define TX_TIMER_CREATE_EXTENSION(timer_ptr) + +/* Define the ThreadX object deletion extensions for the remaining objects. */ + +#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr) +#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr) +#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr) +#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr) +#define TX_QUEUE_DELETE_EXTENSION(queue_ptr) +#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr) +#define TX_TIMER_DELETE_EXTENSION(timer_ptr) + +/* Define ThreadX interrupt lockout and restore macros for protection on + access of critical kernel information. The restore interrupt macro must + restore the interrupt posture of the running thread prior to the value + present prior to the disable macro. In most cases, the save area macro + is used to define a local function save area for the disable and restore + macros. */ + +#ifdef TX_DISABLE_INLINE + +ULONG64 _tx_thread_interrupt_control(unsigned int new_posture); + +#define TX_INTERRUPT_SAVE_AREA register ULONG64 interrupt_save; + +#define TX_DISABLE interrupt_save = _tx_thread_interrupt_control(TX_INT_DISABLE); +#define TX_RESTORE _tx_thread_interrupt_control(interrupt_save); + +#else + +#define TX_INTERRUPT_SAVE_AREA ULONG64 interrupt_save; +/* Atomically read mstatus into interrupt_save and clear bit 3 of mstatus. */ +#define TX_DISABLE \ + { __asm__("csrrci %0, mstatus, 0x08" : "=r"(interrupt_save) :); }; +/* We only care about mstatus.mie (bit 3), so mask interrupt_save and write to mstatus. */ +#define TX_RESTORE \ + { \ + register ULONG64 __tempmask = interrupt_save & 0x08; \ + __asm__("csrrs x0, mstatus, %0 \n\t" : : "r"(__tempmask) :); \ + }; + +#endif + +/* Define the interrupt lockout macros for each ThreadX object. */ + +#define TX_BLOCK_POOL_DISABLE TX_DISABLE +#define TX_BYTE_POOL_DISABLE TX_DISABLE +#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE +#define TX_MUTEX_DISABLE TX_DISABLE +#define TX_QUEUE_DISABLE TX_DISABLE +#define TX_SEMAPHORE_DISABLE TX_DISABLE + +/* Define the version ID of ThreadX. This may be utilized by the application. */ + +#ifdef TX_THREAD_INIT +CHAR _tx_version_id[] = "Copyright (c) 2024 Microsoft Corporation. * ThreadX RISC-V64/GNU Version 6.4.2 *"; +#else +extern CHAR _tx_version_id[]; +#endif + +#endif /*not __ASSEMBLER__ */ +#endif diff --git a/port/threadx/src/tx_initialize_low_level.S b/port/threadx/src/tx_initialize_low_level.S new file mode 100644 index 0000000..4f8a776 --- /dev/null +++ b/port/threadx/src/tx_initialize_low_level.S @@ -0,0 +1,163 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + +#include "csr.h" +#include "tx_port.h" + + .section .text + .align 4 +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* trap_entry RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Jer6y , luojun@oerv.isrc.iscas.ac.cn */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function is responsible for riscv processor trap handle */ +/* It will do the contex save and call c trap_handler and do contex */ +/* load */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* trap_handler */ +/* */ +/* CALLED BY */ +/* */ +/* hardware exception */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 10-25-2024 Jerry Luo */ +/* */ +/**************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Initialize */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + .global trap_entry + .extern _tx_thread_context_restore + trap_entry: +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, -65*REGBYTES // Allocate space for all registers - with floating point enabled +#else + addi sp, sp, -32*REGBYTES // Allocate space for all registers - without floating point enabled +#endif + + STORE x1, 28*REGBYTES(sp) // Store RA, 28*REGBYTES(because call will override ra [ra is a calle register in riscv]) + + call _tx_thread_context_save + + csrr a0, mcause + csrr a1, mepc + csrr a2, mtval + addi sp, sp, -8 + STORE ra, 0(sp) + call trap_handler + LOAD ra, 0(sp) + addi sp, sp, 8 + call _tx_thread_context_restore + // it will nerver return +.weak trap_handler +trap_handler: +1: + j 1b + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_initialize_low_level RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function is responsible for any low-level processor */ +/* initialization, including setting up interrupt vectors, setting */ +/* up a periodic timer interrupt source, saving the system stack */ +/* pointer for use in ISR processing later, and finding the first */ +/* available RAM memory address for tx_application_define. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* None */ +/* */ +/* CALLED BY */ +/* */ +/* _tx_initialize_kernel_enter ThreadX entry function */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_initialize_low_level(VOID) +*/ + .global _tx_initialize_low_level + .weak _tx_initialize_low_level + .extern _end + .extern board_init +_tx_initialize_low_level: + STORE sp, _tx_thread_system_stack_ptr, t0 // Save system stack pointer + + la t0, _end // Pickup first free address + STORE t0, _tx_initialize_unused_memory, t1 // Save unused memory address + li t0, MSTATUS_MIE + csrrc zero, mstatus, t0 // clear MSTATUS_MIE bit + li t0, (MSTATUS_MPP_M | MSTATUS_MPIE ) + csrrs zero, mstatus, t0 // set MSTATUS_MPP, MPIE bit + li t0, (MIE_MTIE | MIE_MSIE | MIE_MEIE) + csrrs zero, mie, t0 // set mie +#ifdef __riscv_flen + li t0, MSTATUS_FS + csrrs zero, mstatus, t0 // set MSTATUS_FS bit to open f/d isa in riscv + fscsr x0 +#endif + addi sp, sp, -8 + STORE ra, 0(sp) + call board_init + LOAD ra, 0(sp) + addi sp, sp, 8 + la t0, trap_entry + csrw mtvec, t0 + ret diff --git a/port/threadx/src/tx_thread_context_restore.S b/port/threadx/src/tx_thread_context_restore.S new file mode 100644 index 0000000..200f230 --- /dev/null +++ b/port/threadx/src/tx_thread_context_restore.S @@ -0,0 +1,382 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#include "tx_port.h" + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_context_restore RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function restores the interrupt context if it is processing a */ +/* nested interrupt. If not, it returns to the interrupt thread if no */ +/* preemption is necessary. Otherwise, if preemption is necessary or */ +/* if no thread was running, the function returns to the scheduler. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* _tx_thread_schedule Thread scheduling routine */ +/* */ +/* CALLED BY */ +/* */ +/* ISRs Interrupt Service Routines */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_thread_context_restore(VOID) +{ */ + .global _tx_thread_context_restore +_tx_thread_context_restore: + + /* Lockout interrupts. */ + + csrci mstatus, 0x08 // Disable interrupts + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + call _tx_execution_isr_exit // Call the ISR execution exit function +#endif + + /* Determine if interrupts are nested. */ + /* if (--_tx_thread_system_state) + { */ + + la t0, _tx_thread_system_state // Pickup addr of nested interrupt count + lw t1, 0(t0) // Pickup nested interrupt count + addi t1, t1, -1 // Decrement the nested interrupt counter + sw t1, 0(t0) // Store new nested count + beqz t1, _tx_thread_not_nested_restore // If 0, not nested restore + + /* Interrupts are nested. */ + + /* Just recover the saved registers and return to the point of + interrupt. */ + + /* Recover floating point registers. */ +#if defined(__riscv_float_abi_single) + flw f0, 31*REGBYTES(sp) // Recover ft0 + flw f1, 32*REGBYTES(sp) // Recover ft1 + flw f2, 33*REGBYTES(sp) // Recover ft2 + flw f3, 34*REGBYTES(sp) // Recover ft3 + flw f4, 35*REGBYTES(sp) // Recover ft4 + flw f5, 36*REGBYTES(sp) // Recover ft5 + flw f6, 37*REGBYTES(sp) // Recover ft6 + flw f7, 38*REGBYTES(sp) // Recover ft7 + flw f10,41*REGBYTES(sp) // Recover fa0 + flw f11,42*REGBYTES(sp) // Recover fa1 + flw f12,43*REGBYTES(sp) // Recover fa2 + flw f13,44*REGBYTES(sp) // Recover fa3 + flw f14,45*REGBYTES(sp) // Recover fa4 + flw f15,46*REGBYTES(sp) // Recover fa5 + flw f16,47*REGBYTES(sp) // Recover fa6 + flw f17,48*REGBYTES(sp) // Recover fa7 + flw f28,59*REGBYTES(sp) // Recover ft8 + flw f29,60*REGBYTES(sp) // Recover ft9 + flw f30,61*REGBYTES(sp) // Recover ft10 + flw f31,62*REGBYTES(sp) // Recover ft11 + lw t0, 63*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#elif defined(__riscv_float_abi_double) + fld f0, 31*REGBYTES(sp) // Recover ft0 + fld f1, 32*REGBYTES(sp) // Recover ft1 + fld f2, 33*REGBYTES(sp) // Recover ft2 + fld f3, 34*REGBYTES(sp) // Recover ft3 + fld f4, 35*REGBYTES(sp) // Recover ft4 + fld f5, 36*REGBYTES(sp) // Recover ft5 + fld f6, 37*REGBYTES(sp) // Recover ft6 + fld f7, 38*REGBYTES(sp) // Recover ft7 + fld f10,41*REGBYTES(sp) // Recover fa0 + fld f11,42*REGBYTES(sp) // Recover fa1 + fld f12,43*REGBYTES(sp) // Recover fa2 + fld f13,44*REGBYTES(sp) // Recover fa3 + fld f14,45*REGBYTES(sp) // Recover fa4 + fld f15,46*REGBYTES(sp) // Recover fa5 + fld f16,47*REGBYTES(sp) // Recover fa6 + fld f17,48*REGBYTES(sp) // Recover fa7 + fld f28,59*REGBYTES(sp) // Recover ft8 + fld f29,60*REGBYTES(sp) // Recover ft9 + fld f30,61*REGBYTES(sp) // Recover ft10 + fld f31,62*REGBYTES(sp) // Recover ft11 + LOAD t0, 63*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#endif + + /* Recover standard registers. */ + + /* Restore registers, + Skip global pointer because that does not change. + Also skip the saved registers since they have been restored by any function we called, + except s0 since we use it ourselves. */ + + LOAD t0, 30*REGBYTES(sp) // Recover mepc + csrw mepc, t0 // Setup mepc + li t0, 0x1880 // Prepare MPIP +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + li t1, 1<<13 + or t0, t1, t0 +#endif + csrw mstatus, t0 // Enable MPIP + + LOAD x1, 28*REGBYTES(sp) // Recover RA + LOAD x5, 19*REGBYTES(sp) // Recover t0 + LOAD x6, 18*REGBYTES(sp) // Recover t1 + LOAD x7, 17*REGBYTES(sp) // Recover t2 + LOAD x8, 12*REGBYTES(sp) // Recover s0 + LOAD x10, 27*REGBYTES(sp) // Recover a0 + LOAD x11, 26*REGBYTES(sp) // Recover a1 + LOAD x12, 25*REGBYTES(sp) // Recover a2 + LOAD x13, 24*REGBYTES(sp) // Recover a3 + LOAD x14, 23*REGBYTES(sp) // Recover a4 + LOAD x15, 22*REGBYTES(sp) // Recover a5 + LOAD x16, 21*REGBYTES(sp) // Recover a6 + LOAD x17, 20*REGBYTES(sp) // Recover a7 + LOAD x28, 16*REGBYTES(sp) // Recover t3 + LOAD x29, 15*REGBYTES(sp) // Recover t4 + LOAD x30, 14*REGBYTES(sp) // Recover t5 + LOAD x31, 13*REGBYTES(sp) // Recover t6 + +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point enabled +#else + addi sp, sp, 32*REGBYTES // Recover stack frame - without floating point enabled +#endif + mret // Return to point of interrupt + + /* } */ +_tx_thread_not_nested_restore: + /* Determine if a thread was interrupted and no preemption is required. */ + /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr) + || (_tx_thread_preempt_disable)) + { */ + + LOAD t1, _tx_thread_current_ptr // Pickup current thread pointer + beqz t1, _tx_thread_idle_system_restore // If NULL, idle system restore + + LOAD t2, _tx_thread_preempt_disable // Pickup preempt disable flag + bgtz t2, _tx_thread_no_preempt_restore // If set, restore interrupted thread + + LOAD t2, _tx_thread_execute_ptr // Pickup thread execute pointer + bne t1, t2, _tx_thread_preempt_restore // If higher-priority thread is ready, preempt + + +_tx_thread_no_preempt_restore: + /* Restore interrupted thread or ISR. */ + + /* Pickup the saved stack pointer. */ + /* SP = _tx_thread_current_ptr -> tx_thread_stack_ptr; */ + + LOAD sp, 2*REGBYTES(t1) // Switch back to thread's stack + + /* Recover floating point registers. */ +#if defined(__riscv_float_abi_single) + flw f0, 31*REGBYTES(sp) // Recover ft0 + flw f1, 32*REGBYTES(sp) // Recover ft1 + flw f2, 33*REGBYTES(sp) // Recover ft2 + flw f3, 34*REGBYTES(sp) // Recover ft3 + flw f4, 35*REGBYTES(sp) // Recover ft4 + flw f5, 36*REGBYTES(sp) // Recover ft5 + flw f6, 37*REGBYTES(sp) // Recover ft6 + flw f7, 38*REGBYTES(sp) // Recover ft7 + flw f10,41*REGBYTES(sp) // Recover fa0 + flw f11,42*REGBYTES(sp) // Recover fa1 + flw f12,43*REGBYTES(sp) // Recover fa2 + flw f13,44*REGBYTES(sp) // Recover fa3 + flw f14,45*REGBYTES(sp) // Recover fa4 + flw f15,46*REGBYTES(sp) // Recover fa5 + flw f16,47*REGBYTES(sp) // Recover fa6 + flw f17,48*REGBYTES(sp) // Recover fa7 + flw f28,59*REGBYTES(sp) // Recover ft8 + flw f29,60*REGBYTES(sp) // Recover ft9 + flw f30,61*REGBYTES(sp) // Recover ft10 + flw f31,62*REGBYTES(sp) // Recover ft11 + lw t0, 63*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#elif defined(__riscv_float_abi_double) + fld f0, 31*REGBYTES(sp) // Recover ft0 + fld f1, 32*REGBYTES(sp) // Recover ft1 + fld f2, 33*REGBYTES(sp) // Recover ft2 + fld f3, 34*REGBYTES(sp) // Recover ft3 + fld f4, 35*REGBYTES(sp) // Recover ft4 + fld f5, 36*REGBYTES(sp) // Recover ft5 + fld f6, 37*REGBYTES(sp) // Recover ft6 + fld f7, 38*REGBYTES(sp) // Recover ft7 + fld f10,41*REGBYTES(sp) // Recover fa0 + fld f11,42*REGBYTES(sp) // Recover fa1 + fld f12,43*REGBYTES(sp) // Recover fa2 + fld f13,44*REGBYTES(sp) // Recover fa3 + fld f14,45*REGBYTES(sp) // Recover fa4 + fld f15,46*REGBYTES(sp) // Recover fa5 + fld f16,47*REGBYTES(sp) // Recover fa6 + fld f17,48*REGBYTES(sp) // Recover fa7 + fld f28,59*REGBYTES(sp) // Recover ft8 + fld f29,60*REGBYTES(sp) // Recover ft9 + fld f30,61*REGBYTES(sp) // Recover ft10 + fld f31,62*REGBYTES(sp) // Recover ft11 + LOAD t0, 63*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#endif + + /* Recover the saved context and return to the point of interrupt. */ + + /* Recover standard registers. */ + /* Restore registers, + Skip global pointer because that does not change */ + + LOAD t0, 30*REGBYTES(sp) // Recover mepc + csrw mepc, t0 // Setup mepc + li t0, 0x1880 // Prepare MPIP +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + li t1, 1<<13 + or t0, t1, t0 +#endif + csrw mstatus, t0 // Enable MPIP + + LOAD x1, 28*REGBYTES(sp) // Recover RA + LOAD x5, 19*REGBYTES(sp) // Recover t0 + LOAD x6, 18*REGBYTES(sp) // Recover t1 + LOAD x7, 17*REGBYTES(sp) // Recover t2 + LOAD x8, 12*REGBYTES(sp) // Recover s0 + LOAD x10, 27*REGBYTES(sp) // Recover a0 + LOAD x11, 26*REGBYTES(sp) // Recover a1 + LOAD x12, 25*REGBYTES(sp) // Recover a2 + LOAD x13, 24*REGBYTES(sp) // Recover a3 + LOAD x14, 23*REGBYTES(sp) // Recover a4 + LOAD x15, 22*REGBYTES(sp) // Recover a5 + LOAD x16, 21*REGBYTES(sp) // Recover a6 + LOAD x17, 20*REGBYTES(sp) // Recover a7 + LOAD x28, 16*REGBYTES(sp) // Recover t3 + LOAD x29, 15*REGBYTES(sp) // Recover t4 + LOAD x30, 14*REGBYTES(sp) // Recover t5 + LOAD x31, 13*REGBYTES(sp) // Recover t6 + +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point enabled +#else + addi sp, sp, 32*REGBYTES // Recover stack frame - without floating point enabled +#endif + mret // Return to point of interrupt + + /* } + else + { */ +_tx_thread_preempt_restore: + /* Instead of directly activating the thread again, ensure we save the + entire stack frame by saving the remaining registers. */ + + LOAD t0, 2*REGBYTES(t1) // Pickup thread's stack pointer + ori t3, x0, 1 // Build interrupt stack type + STORE t3, 0(t0) // Store stack type + + /* Store floating point preserved registers. */ +#ifdef __riscv_float_abi_single + fsw f8, 39*REGBYTES(t0) // Store fs0 + fsw f9, 40*REGBYTES(t0) // Store fs1 + fsw f18, 49*REGBYTES(t0) // Store fs2 + fsw f19, 50*REGBYTES(t0) // Store fs3 + fsw f20, 51*REGBYTES(t0) // Store fs4 + fsw f21, 52*REGBYTES(t0) // Store fs5 + fsw f22, 53*REGBYTES(t0) // Store fs6 + fsw f23, 54*REGBYTES(t0) // Store fs7 + fsw f24, 55*REGBYTES(t0) // Store fs8 + fsw f25, 56*REGBYTES(t0) // Store fs9 + fsw f26, 57*REGBYTES(t0) // Store fs10 + fsw f27, 58*REGBYTES(t0) // Store fs11 +#elif defined(__riscv_float_abi_double) + fsd f8, 39*REGBYTES(t0) // Store fs0 + fsd f9, 40*REGBYTES(t0) // Store fs1 + fsd f18, 49*REGBYTES(t0) // Store fs2 + fsd f19, 50*REGBYTES(t0) // Store fs3 + fsd f20, 51*REGBYTES(t0) // Store fs4 + fsd f21, 52*REGBYTES(t0) // Store fs5 + fsd f22, 53*REGBYTES(t0) // Store fs6 + fsd f23, 54*REGBYTES(t0) // Store fs7 + fsd f24, 55*REGBYTES(t0) // Store fs8 + fsd f25, 56*REGBYTES(t0) // Store fs9 + fsd f26, 57*REGBYTES(t0) // Store fs10 + fsd f27, 58*REGBYTES(t0) // Store fs11 +#endif + + /* Store standard preserved registers. */ + + STORE x9, 11*REGBYTES(t0) // Store s1 + STORE x18, 10*REGBYTES(t0) // Store s2 + STORE x19, 9*REGBYTES(t0) // Store s3 + STORE x20, 8*REGBYTES(t0) // Store s4 + STORE x21, 7*REGBYTES(t0) // Store s5 + STORE x22, 6*REGBYTES(t0) // Store s6 + STORE x23, 5*REGBYTES(t0) // Store s7 + STORE x24, 4*REGBYTES(t0) // Store s8 + STORE x25, 3*REGBYTES(t0) // Store s9 + STORE x26, 2*REGBYTES(t0) // Store s10 + STORE x27, 1*REGBYTES(t0) // Store s11 + // Note: s0 is already stored! + + /* Save the remaining time-slice and disable it. */ + /* if (_tx_timer_time_slice) + { */ + + la t0, _tx_timer_time_slice // Pickup time slice variable address + lw t2, 0(t0) // Pickup time slice + beqz t2, _tx_thread_dont_save_ts // If 0, skip time slice processing + + /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice + _tx_timer_time_slice = 0; */ + + sw t2, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time slice + sw x0, 0(t0) // Clear global time slice + + + /* } */ +_tx_thread_dont_save_ts: + /* Clear the current task pointer. */ + /* _tx_thread_current_ptr = TX_NULL; */ + + /* Return to the scheduler. */ + /* _tx_thread_schedule(); */ + + STORE x0, _tx_thread_current_ptr, t0 // Clear current thread pointer*/ + /* } */ + +_tx_thread_idle_system_restore: + /* Just return back to the scheduler! */ + j _tx_thread_schedule // Return to scheduler + +/* } */ diff --git a/port/threadx/src/tx_thread_context_save.S b/port/threadx/src/tx_thread_context_save.S new file mode 100644 index 0000000..0de69be --- /dev/null +++ b/port/threadx/src/tx_thread_context_save.S @@ -0,0 +1,283 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#include "tx_port.h" + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_context_save RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function saves the context of an executing thread in the */ +/* beginning of interrupt processing. The function also ensures that */ +/* the system stack is used upon return to the calling ISR. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* None */ +/* */ +/* CALLED BY */ +/* */ +/* ISRs */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_thread_context_save(VOID) +{ */ + .global _tx_thread_context_save +_tx_thread_context_save: + + /* Upon entry to this routine, it is assumed that interrupts are locked + out and the interrupt stack fame has been allocated and x1 (ra) has + been saved on the stack. */ + + STORE t0, 19*REGBYTES(sp) // First store t0 and t1 + STORE t1, 18*REGBYTES(sp) + + la t0, _tx_thread_system_state // Pickup address of system state + lw t1, 0(t0) // Pickup system state + + /* Check for a nested interrupt condition. */ + /* if (_tx_thread_system_state++) + { */ + beqz t1, _tx_thread_not_nested_save // If 0, first interrupt condition + addi t1, t1, 1 // Increment the interrupt counter + sw t1, 0(t0) // Store the interrupt counter + + /* Nested interrupt condition. + Save the reset of the scratch registers on the stack and return to the + calling ISR. */ + + STORE x7, 17*REGBYTES(sp) // Store t2 + STORE x8, 12*REGBYTES(sp) // Store s0 + STORE x10, 27*REGBYTES(sp) // Store a0 + STORE x11, 26*REGBYTES(sp) // Store a1 + STORE x12, 25*REGBYTES(sp) // Store a2 + STORE x13, 24*REGBYTES(sp) // Store a3 + STORE x14, 23*REGBYTES(sp) // Store a4 + STORE x15, 22*REGBYTES(sp) // Store a5 + STORE x16, 21*REGBYTES(sp) // Store a6 + STORE x17, 20*REGBYTES(sp) // Store a7 + STORE x28, 16*REGBYTES(sp) // Store t3 + STORE x29, 15*REGBYTES(sp) // Store t4 + STORE x30, 14*REGBYTES(sp) // Store t5 + STORE x31, 13*REGBYTES(sp) // Store t6 + csrr t0, mepc // Load exception program counter + STORE t0, 30*REGBYTES(sp) // Save it on the stack + + /* Save floating point scratch registers. */ +#if defined(__riscv_float_abi_single) + fsw f0, 31*REGBYTES(sp) // Store ft0 + fsw f1, 32*REGBYTES(sp) // Store ft1 + fsw f2, 33*REGBYTES(sp) // Store ft2 + fsw f3, 34*REGBYTES(sp) // Store ft3 + fsw f4, 35*REGBYTES(sp) // Store ft4 + fsw f5, 36*REGBYTES(sp) // Store ft5 + fsw f6, 37*REGBYTES(sp) // Store ft6 + fsw f7, 38*REGBYTES(sp) // Store ft7 + fsw f10,41*REGBYTES(sp) // Store fa0 + fsw f11,42*REGBYTES(sp) // Store fa1 + fsw f12,43*REGBYTES(sp) // Store fa2 + fsw f13,44*REGBYTES(sp) // Store fa3 + fsw f14,45*REGBYTES(sp) // Store fa4 + fsw f15,46*REGBYTES(sp) // Store fa5 + fsw f16,47*REGBYTES(sp) // Store fa6 + fsw f17,48*REGBYTES(sp) // Store fa7 + fsw f28,59*REGBYTES(sp) // Store ft8 + fsw f29,60*REGBYTES(sp) // Store ft9 + fsw f30,61*REGBYTES(sp) // Store ft10 + fsw f31,62*REGBYTES(sp) // Store ft11 + csrr t0, fcsr + STORE t0, 63*REGBYTES(sp) // Store fcsr +#elif defined(__riscv_float_abi_double) + fsd f0, 31*REGBYTES(sp) // Store ft0 + fsd f1, 32*REGBYTES(sp) // Store ft1 + fsd f2, 33*REGBYTES(sp) // Store ft2 + fsd f3, 34*REGBYTES(sp) // Store ft3 + fsd f4, 35*REGBYTES(sp) // Store ft4 + fsd f5, 36*REGBYTES(sp) // Store ft5 + fsd f6, 37*REGBYTES(sp) // Store ft6 + fsd f7, 38*REGBYTES(sp) // Store ft7 + fsd f10,41*REGBYTES(sp) // Store fa0 + fsd f11,42*REGBYTES(sp) // Store fa1 + fsd f12,43*REGBYTES(sp) // Store fa2 + fsd f13,44*REGBYTES(sp) // Store fa3 + fsd f14,45*REGBYTES(sp) // Store fa4 + fsd f15,46*REGBYTES(sp) // Store fa5 + fsd f16,47*REGBYTES(sp) // Store fa6 + fsd f17,48*REGBYTES(sp) // Store fa7 + fsd f28,59*REGBYTES(sp) // Store ft8 + fsd f29,60*REGBYTES(sp) // Store ft9 + fsd f30,61*REGBYTES(sp) // Store ft10 + fsd f31,62*REGBYTES(sp) // Store ft11 + csrr t0, fcsr + STORE t0, 63*REGBYTES(sp) // Store fcsr +#endif + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + call _tx_execution_isr_enter // Call the ISR execution enter function +#endif + + ret // Return to calling ISR + +_tx_thread_not_nested_save: + /* } */ + + /* Otherwise, not nested, check to see if a thread was running. */ + /* else if (_tx_thread_current_ptr) + { */ + addi t1, t1, 1 // Increment the interrupt counter + sw t1, 0(t0) // Store the interrupt counter + + /* Not nested: Find the user thread that was running and load our SP */ + + LOAD t0, _tx_thread_current_ptr // Pickup current thread pointer + beqz t0, _tx_thread_idle_system_save // If NULL, idle system was interrupted + + /* Save the standard scratch registers. */ + + STORE x7, 17*REGBYTES(sp) // Store t2 + STORE x8, 12*REGBYTES(sp) // Store s0 + STORE x10, 27*REGBYTES(sp) // Store a0 + STORE x11, 26*REGBYTES(sp) // Store a1 + STORE x12, 25*REGBYTES(sp) // Store a2 + STORE x13, 24*REGBYTES(sp) // Store a3 + STORE x14, 23*REGBYTES(sp) // Store a4 + STORE x15, 22*REGBYTES(sp) // Store a5 + STORE x16, 21*REGBYTES(sp) // Store a6 + STORE x17, 20*REGBYTES(sp) // Store a7 + STORE x28, 16*REGBYTES(sp) // Store t3 + STORE x29, 15*REGBYTES(sp) // Store t4 + STORE x30, 14*REGBYTES(sp) // Store t5 + STORE x31, 13*REGBYTES(sp) // Store t6 + + csrr t0, mepc // Load exception program counter + STORE t0, 30*REGBYTES(sp) // Save it on the stack + + /* Save floating point scratch registers. */ +#if defined(__riscv_float_abi_single) + fsw f0, 31*REGBYTES(sp) // Store ft0 + fsw f1, 32*REGBYTES(sp) // Store ft1 + fsw f2, 33*REGBYTES(sp) // Store ft2 + fsw f3, 34*REGBYTES(sp) // Store ft3 + fsw f4, 35*REGBYTES(sp) // Store ft4 + fsw f5, 36*REGBYTES(sp) // Store ft5 + fsw f6, 37*REGBYTES(sp) // Store ft6 + fsw f7, 38*REGBYTES(sp) // Store ft7 + fsw f10,41*REGBYTES(sp) // Store fa0 + fsw f11,42*REGBYTES(sp) // Store fa1 + fsw f12,43*REGBYTES(sp) // Store fa2 + fsw f13,44*REGBYTES(sp) // Store fa3 + fsw f14,45*REGBYTES(sp) // Store fa4 + fsw f15,46*REGBYTES(sp) // Store fa5 + fsw f16,47*REGBYTES(sp) // Store fa6 + fsw f17,48*REGBYTES(sp) // Store fa7 + fsw f28,59*REGBYTES(sp) // Store ft8 + fsw f29,60*REGBYTES(sp) // Store ft9 + fsw f30,61*REGBYTES(sp) // Store ft10 + fsw f31,62*REGBYTES(sp) // Store ft11 + csrr t0, fcsr + STORE t0, 63*REGBYTES(sp) // Store fcsr +#elif defined(__riscv_float_abi_double) + fsd f0, 31*REGBYTES(sp) // Store ft0 + fsd f1, 32*REGBYTES(sp) // Store ft1 + fsd f2, 33*REGBYTES(sp) // Store ft2 + fsd f3, 34*REGBYTES(sp) // Store ft3 + fsd f4, 35*REGBYTES(sp) // Store ft4 + fsd f5, 36*REGBYTES(sp) // Store ft5 + fsd f6, 37*REGBYTES(sp) // Store ft6 + fsd f7, 38*REGBYTES(sp) // Store ft7 + fsd f10,41*REGBYTES(sp) // Store fa0 + fsd f11,42*REGBYTES(sp) // Store fa1 + fsd f12,43*REGBYTES(sp) // Store fa2 + fsd f13,44*REGBYTES(sp) // Store fa3 + fsd f14,45*REGBYTES(sp) // Store fa4 + fsd f15,46*REGBYTES(sp) // Store fa5 + fsd f16,47*REGBYTES(sp) // Store fa6 + fsd f17,48*REGBYTES(sp) // Store fa7 + fsd f28,59*REGBYTES(sp) // Store ft8 + fsd f29,60*REGBYTES(sp) // Store ft9 + fsd f30,61*REGBYTES(sp) // Store ft10 + fsd f31,62*REGBYTES(sp) // Store ft11 + csrr t0, fcsr + STORE t0, 63*REGBYTES(sp) // Store fcsr +#endif + + /* Save the current stack pointer in the thread's control block. */ + /* _tx_thread_current_ptr -> tx_thread_stack_ptr = sp; */ + + /* Switch to the system stack. */ + /* sp = _tx_thread_system_stack_ptr; */ + + LOAD t1, _tx_thread_current_ptr // Pickup current thread pointer + STORE sp, 2*REGBYTES(t1) // Save stack pointer + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + /* _tx_execution_isr_enter is called with thread stack pointer */ + call _tx_execution_isr_enter // Call the ISR execution enter function +#endif + + + LOAD sp, _tx_thread_system_stack_ptr // Switch to system stack + ret // Return to calling ISR + + /* } + else + { */ + +_tx_thread_idle_system_save: + + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + call _tx_execution_isr_enter // Call the ISR execution enter function +#endif + + /* Interrupt occurred in the scheduling loop. */ + + /* } +} */ +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point enabled +#else + addi sp, sp, 32*REGBYTES // Recover the reserved stack space +#endif + ret // Return to calling ISR diff --git a/port/threadx/src/tx_thread_interrupt_control.S b/port/threadx/src/tx_thread_interrupt_control.S new file mode 100644 index 0000000..c75c7c4 --- /dev/null +++ b/port/threadx/src/tx_thread_interrupt_control.S @@ -0,0 +1,81 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + + RETURN_MASK = 0x000000000000000F + SET_SR_MASK = 0xFFFFFFFFFFFFFFF0 + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_interrupt_control RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function is responsible for changing the interrupt lockout */ +/* posture of the system. */ +/* */ +/* INPUT */ +/* */ +/* new_posture New interrupt lockout posture */ +/* */ +/* OUTPUT */ +/* */ +/* old_posture Old interrupt lockout posture */ +/* */ +/* CALLS */ +/* */ +/* None */ +/* */ +/* CALLED BY */ +/* */ +/* Application Code */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* UINT _tx_thread_interrupt_control(UINT new_posture) +{ */ + .global _tx_thread_interrupt_control +_tx_thread_interrupt_control: + /* Pickup current interrupt lockout posture. */ + + csrr t0, mstatus + mv t1, t0 // Save original mstatus for return + + /* Apply the new interrupt posture. */ + + li t2, SET_SR_MASK // Build set SR mask + and t0, t0, t2 // Isolate interrupt lockout bits + or t0, t0, a0 // Put new lockout bits in + csrw mstatus, t0 + andi a0, t1, RETURN_MASK // Return original mstatus. + ret +/* } */ diff --git a/port/threadx/src/tx_thread_schedule.S b/port/threadx/src/tx_thread_schedule.S new file mode 100644 index 0000000..c9be4c6 --- /dev/null +++ b/port/threadx/src/tx_thread_schedule.S @@ -0,0 +1,305 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#include "tx_port.h" + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_schedule RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function waits for a thread control block pointer to appear in */ +/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */ +/* in the variable, the corresponding thread is resumed. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* None */ +/* */ +/* CALLED BY */ +/* */ +/* _tx_initialize_kernel_enter ThreadX entry function */ +/* _tx_thread_system_return Return to system from thread */ +/* _tx_thread_context_restore Restore thread's context */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_thread_schedule(VOID) +{ */ + .global _tx_thread_schedule +_tx_thread_schedule: + + /* Enable interrupts. */ + csrsi mstatus, 0x08 // Enable interrupts + + /* Wait for a thread to execute. */ + /* do + { */ + + la t0, _tx_thread_execute_ptr // Pickup address of execute ptr +_tx_thread_schedule_loop: + LOAD t1, 0(t0) // Pickup next thread to execute + beqz t1, _tx_thread_schedule_loop // If NULL, wait for thread to execute + + /* } + while(_tx_thread_execute_ptr == TX_NULL); */ + + /* Yes! We have a thread to execute. Lockout interrupts and + transfer control to it. */ + csrci mstatus, 0x08 // Lockout interrupts + + /* Setup the current thread pointer. */ + /* _tx_thread_current_ptr = _tx_thread_execute_ptr; */ + + la t0, _tx_thread_current_ptr // Pickup current thread pointer address + STORE t1, 0(t0) // Set current thread pointer + + /* Increment the run count for this thread. */ + /* _tx_thread_current_ptr -> tx_thread_run_count++; */ + + LOAD t2, 1*REGBYTES(t1) // Pickup run count + LOAD t3, 6*REGBYTES(t1) // Pickup time slice value + addi t2, t2, 1 // Increment run count + STORE t2, 1*REGBYTES(t1) // Store new run count + + /* Setup time-slice, if present. */ + /* _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; */ + + la t2, _tx_timer_time_slice // Pickup time-slice variable address + + /* Switch to the thread's stack. */ + /* SP = _tx_thread_execute_ptr -> tx_thread_stack_ptr; */ + + LOAD sp, 2*REGBYTES(t1) // Switch to thread's stack + STORE t3, 0(t2) // Store new time-slice*/ + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + + call _tx_execution_thread_enter // Call the thread execution enter function +#endif + + /* Determine if an interrupt frame or a synchronous task suspension frame + is present. */ + + LOAD t2, 0(sp) // Pickup stack type + beqz t2, _tx_thread_synch_return // If 0, solicited thread return + + /* Determine if floating point registers need to be recovered. */ + +#if defined(__riscv_float_abi_single) + flw f0, 31*REGBYTES(sp) // Recover ft0 + flw f1, 32*REGBYTES(sp) // Recover ft1 + flw f2, 33*REGBYTES(sp) // Recover ft2 + flw f3, 34*REGBYTES(sp) // Recover ft3 + flw f4, 35*REGBYTES(sp) // Recover ft4 + flw f5, 36*REGBYTES(sp) // Recover ft5 + flw f6, 37*REGBYTES(sp) // Recover ft6 + flw f7, 38*REGBYTES(sp) // Recover ft7 + flw f8, 39*REGBYTES(sp) // Recover fs0 + flw f9, 40*REGBYTES(sp) // Recover fs1 + flw f10,41*REGBYTES(sp) // Recover fa0 + flw f11,42*REGBYTES(sp) // Recover fa1 + flw f12,43*REGBYTES(sp) // Recover fa2 + flw f13,44*REGBYTES(sp) // Recover fa3 + flw f14,45*REGBYTES(sp) // Recover fa4 + flw f15,46*REGBYTES(sp) // Recover fa5 + flw f16,47*REGBYTES(sp) // Recover fa6 + flw f17,48*REGBYTES(sp) // Recover fa7 + flw f18,49*REGBYTES(sp) // Recover fs2 + flw f19,50*REGBYTES(sp) // Recover fs3 + flw f20,51*REGBYTES(sp) // Recover fs4 + flw f21,52*REGBYTES(sp) // Recover fs5 + flw f22,53*REGBYTES(sp) // Recover fs6 + flw f23,54*REGBYTES(sp) // Recover fs7 + flw f24,55*REGBYTES(sp) // Recover fs8 + flw f25,56*REGBYTES(sp) // Recover fs9 + flw f26,57*REGBYTES(sp) // Recover fs10 + flw f27,58*REGBYTES(sp) // Recover fs11 + flw f28,59*REGBYTES(sp) // Recover ft8 + flw f29,60*REGBYTES(sp) // Recover ft9 + flw f30,61*REGBYTES(sp) // Recover ft10 + flw f31,62*REGBYTES(sp) // Recover ft11 + LOAD t0, 63*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#elif defined(__riscv_float_abi_double) + fld f0, 31*REGBYTES(sp) // Recover ft0 + fld f1, 32*REGBYTES(sp) // Recover ft1 + fld f2, 33*REGBYTES(sp) // Recover ft2 + fld f3, 34*REGBYTES(sp) // Recover ft3 + fld f4, 35*REGBYTES(sp) // Recover ft4 + fld f5, 36*REGBYTES(sp) // Recover ft5 + fld f6, 37*REGBYTES(sp) // Recover ft6 + fld f7, 38*REGBYTES(sp) // Recover ft7 + fld f8, 39*REGBYTES(sp) // Recover fs0 + fld f9, 40*REGBYTES(sp) // Recover fs1 + fld f10,41*REGBYTES(sp) // Recover fa0 + fld f11,42*REGBYTES(sp) // Recover fa1 + fld f12,43*REGBYTES(sp) // Recover fa2 + fld f13,44*REGBYTES(sp) // Recover fa3 + fld f14,45*REGBYTES(sp) // Recover fa4 + fld f15,46*REGBYTES(sp) // Recover fa5 + fld f16,47*REGBYTES(sp) // Recover fa6 + fld f17,48*REGBYTES(sp) // Recover fa7 + fld f18,49*REGBYTES(sp) // Recover fs2 + fld f19,50*REGBYTES(sp) // Recover fs3 + fld f20,51*REGBYTES(sp) // Recover fs4 + fld f21,52*REGBYTES(sp) // Recover fs5 + fld f22,53*REGBYTES(sp) // Recover fs6 + fld f23,54*REGBYTES(sp) // Recover fs7 + fld f24,55*REGBYTES(sp) // Recover fs8 + fld f25,56*REGBYTES(sp) // Recover fs9 + fld f26,57*REGBYTES(sp) // Recover fs10 + fld f27,58*REGBYTES(sp) // Recover fs11 + fld f28,59*REGBYTES(sp) // Recover ft8 + fld f29,60*REGBYTES(sp) // Recover ft9 + fld f30,61*REGBYTES(sp) // Recover ft10 + fld f31,62*REGBYTES(sp) // Recover ft11 + LOAD t0, 63*REGBYTES(sp) // Recover fcsr +#endif + + /* Recover standard registers. */ + + LOAD t0, 30*REGBYTES(sp) // Recover mepc + csrw mepc, t0 // Store mepc + li t0, 0x1880 // Prepare MPIP +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + li t1, 1<<13 + or t0, t1, t0 +#endif + csrw mstatus, t0 // Enable MPIP + + LOAD x1, 28*REGBYTES(sp) // Recover RA + LOAD x5, 19*REGBYTES(sp) // Recover t0 + LOAD x6, 18*REGBYTES(sp) // Recover t1 + LOAD x7, 17*REGBYTES(sp) // Recover t2 + LOAD x8, 12*REGBYTES(sp) // Recover s0 + LOAD x9, 11*REGBYTES(sp) // Recover s1 + LOAD x10, 27*REGBYTES(sp) // Recover a0 + LOAD x11, 26*REGBYTES(sp) // Recover a1 + LOAD x12, 25*REGBYTES(sp) // Recover a2 + LOAD x13, 24*REGBYTES(sp) // Recover a3 + LOAD x14, 23*REGBYTES(sp) // Recover a4 + LOAD x15, 22*REGBYTES(sp) // Recover a5 + LOAD x16, 21*REGBYTES(sp) // Recover a6 + LOAD x17, 20*REGBYTES(sp) // Recover a7 + LOAD x18, 10*REGBYTES(sp) // Recover s2 + LOAD x19, 9*REGBYTES(sp) // Recover s3 + LOAD x20, 8*REGBYTES(sp) // Recover s4 + LOAD x21, 7*REGBYTES(sp) // Recover s5 + LOAD x22, 6*REGBYTES(sp) // Recover s6 + LOAD x23, 5*REGBYTES(sp) // Recover s7 + LOAD x24, 4*REGBYTES(sp) // Recover s8 + LOAD x25, 3*REGBYTES(sp) // Recover s9 + LOAD x26, 2*REGBYTES(sp) // Recover s10 + LOAD x27, 1*REGBYTES(sp) // Recover s11 + LOAD x28, 16*REGBYTES(sp) // Recover t3 + LOAD x29, 15*REGBYTES(sp) // Recover t4 + LOAD x30, 14*REGBYTES(sp) // Recover t5 + LOAD x31, 13*REGBYTES(sp) // Recover t6 + +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, 65*REGBYTES // Recover stack frame - with floating point registers +#else + addi sp, sp, 32*REGBYTES // Recover stack frame - without floating point registers +#endif + mret // Return to point of interrupt + +_tx_thread_synch_return: + +#if defined(__riscv_float_abi_single) + flw f8, 15*REGBYTES(sp) // Recover fs0 + flw f9, 16*REGBYTES(sp) // Recover fs1 + flw f18,17*REGBYTES(sp) // Recover fs2 + flw f19,18*REGBYTES(sp) // Recover fs3 + flw f20,19*REGBYTES(sp) // Recover fs4 + flw f21,20*REGBYTES(sp) // Recover fs5 + flw f22,21*REGBYTES(sp) // Recover fs6 + flw f23,22*REGBYTES(sp) // Recover fs7 + flw f24,23*REGBYTES(sp) // Recover fs8 + flw f25,24*REGBYTES(sp) // Recover fs9 + flw f26,25*REGBYTES(sp) // Recover fs10 + flw f27,26*REGBYTES(sp) // Recover fs11 + LOAD t0, 27*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#elif defined(__riscv_float_abi_double) + fld f8, 15*REGBYTES(sp) // Recover fs0 + fld f9, 16*REGBYTES(sp) // Recover fs1 + fld f18,17*REGBYTES(sp) // Recover fs2 + fld f19,18*REGBYTES(sp) // Recover fs3 + fld f20,19*REGBYTES(sp) // Recover fs4 + fld f21,20*REGBYTES(sp) // Recover fs5 + fld f22,21*REGBYTES(sp) // Recover fs6 + fld f23,22*REGBYTES(sp) // Recover fs7 + fld f24,23*REGBYTES(sp) // Recover fs8 + fld f25,24*REGBYTES(sp) // Recover fs9 + fld f26,25*REGBYTES(sp) // Recover fs10 + fld f27,26*REGBYTES(sp) // Recover fs11 + LOAD t0, 27*REGBYTES(sp) // Recover fcsr + csrw fcsr, t0 // +#endif + + /* Recover standard preserved registers. */ + /* Recover standard registers. */ + + LOAD x1, 13*REGBYTES(sp) // Recover RA + LOAD x8, 12*REGBYTES(sp) // Recover s0 + LOAD x9, 11*REGBYTES(sp) // Recover s1 + LOAD x18, 10*REGBYTES(sp) // Recover s2 + LOAD x19, 9*REGBYTES(sp) // Recover s3 + LOAD x20, 8*REGBYTES(sp) // Recover s4 + LOAD x21, 7*REGBYTES(sp) // Recover s5 + LOAD x22, 6*REGBYTES(sp) // Recover s6 + LOAD x23, 5*REGBYTES(sp) // Recover s7 + LOAD x24, 4*REGBYTES(sp) // Recover s8 + LOAD x25, 3*REGBYTES(sp) // Recover s9 + LOAD x26, 2*REGBYTES(sp) // Recover s10 + LOAD x27, 1*REGBYTES(sp) // Recover s11 + LOAD t0, 14*REGBYTES(sp) // Recover mstatus + csrw mstatus, t0 // Store mstatus, enables interrupt +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, 29*REGBYTES // Recover stack frame +#else + addi sp, sp, 16*REGBYTES // Recover stack frame +#endif + ret // Return to thread + +/* } */ diff --git a/port/threadx/src/tx_thread_stack_build.S b/port/threadx/src/tx_thread_stack_build.S new file mode 100644 index 0000000..ec6f90b --- /dev/null +++ b/port/threadx/src/tx_thread_stack_build.S @@ -0,0 +1,227 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#include "tx_port.h" + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_stack_build RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function builds a stack frame on the supplied thread's stack. */ +/* The stack frame results in a fake interrupt return to the supplied */ +/* function pointer. */ +/* */ +/* INPUT */ +/* */ +/* thread_ptr Pointer to thread control blk */ +/* function_ptr Pointer to return function */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* None */ +/* */ +/* CALLED BY */ +/* */ +/* _tx_thread_create Create thread service */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID)) +{ */ + .global _tx_thread_stack_build +_tx_thread_stack_build: + + /* Build a fake interrupt frame. The form of the fake interrupt stack + on the RISC-V should look like the following after it is built: + Reg Index + Stack Top: 1 0 Interrupt stack frame type + x27 1 Initial s11 + x26 2 Initial s10 + x25 3 Initial s9 + x24 4 Initial s8 + x23 5 Initial s7 + x22 6 Initial s6 + x21 7 Initial s5 + x20 8 Initial s4 + x19 9 Initial s3 + x18 10 Initial s2 + x9 11 Initial s1 + x8 12 Initial s0 + x31 13 Initial t6 + x30 14 Initial t5 + x29 15 Initial t4 + x28 16 Initial t3 + x7 17 Initial t2 + x6 18 Initial t1 + x5 19 Initial t0 + x17 20 Initial a7 + x16 21 Initial a6 + x15 22 Initial a5 + x14 23 Initial a4 + x13 24 Initial a3 + x12 25 Initial a2 + x11 26 Initial a1 + x10 27 Initial a0 + x1 28 Initial ra + -- 29 reserved + mepc 30 Initial mepc +If floating point support: + f0 31 Inital ft0 + f1 32 Inital ft1 + f2 33 Inital ft2 + f3 34 Inital ft3 + f4 35 Inital ft4 + f5 36 Inital ft5 + f6 37 Inital ft6 + f7 38 Inital ft7 + f8 39 Inital fs0 + f9 40 Inital fs1 + f10 41 Inital fa0 + f11 42 Inital fa1 + f12 43 Inital fa2 + f13 44 Inital fa3 + f14 45 Inital fa4 + f15 46 Inital fa5 + f16 47 Inital fa6 + f17 48 Inital fa7 + f18 49 Inital fs2 + f19 50 Inital fs3 + f20 51 Inital fs4 + f21 52 Inital fs5 + f22 53 Inital fs6 + f23 54 Inital fs7 + f24 55 Inital fs8 + f25 56 Inital fs9 + f26 57 Inital fs10 + f27 58 Inital fs11 + f28 59 Inital ft8 + f29 60 Inital ft9 + f30 61 Inital ft10 + f31 62 Inital ft11 + fscr 63 Inital fscr + + Stack Bottom: (higher memory address) */ + + LOAD t0, TX_THREAD_STACK_END_OFFSET(a0) // Pickup end of stack area + andi t0, t0, -4*REGBYTES // Ensure alignment (16-byte for RV32 & 32-byte for RV64) + + /* Actually build the stack frame. */ + +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi t0, t0, -65*REGBYTES +#else + addi t0, t0, -32*REGBYTES // Allocate space for the stack frame +#endif + li t1, 1 // Build stack type + STORE t1, 0*REGBYTES(t0) // Place stack type on the top + STORE x0, 1*REGBYTES(t0) // Initial s11 + STORE x0, 2*REGBYTES(t0) // Initial s10 + STORE x0, 3*REGBYTES(t0) // Initial s9 + STORE x0, 4*REGBYTES(t0) // Initial s8 + STORE x0, 5*REGBYTES(t0) // Initial s7 + STORE x0, 6*REGBYTES(t0) // Initial s6 + STORE x0, 7*REGBYTES(t0) // Initial s5 + STORE x0, 8*REGBYTES(t0) // Initial s4 + STORE x0, 9*REGBYTES(t0) // Initial s3 + STORE x0, 10*REGBYTES(t0) // Initial s2 + STORE x0, 11*REGBYTES(t0) // Initial s1 + STORE x0, 12*REGBYTES(t0) // Initial s0 + STORE x0, 13*REGBYTES(t0) // Initial t6 + STORE x0, 14*REGBYTES(t0) // Initial t5 + STORE x0, 15*REGBYTES(t0) // Initial t4 + STORE x0, 16*REGBYTES(t0) // Initial t3 + STORE x0, 17*REGBYTES(t0) // Initial t2 + STORE x0, 18*REGBYTES(t0) // Initial t1 + STORE x0, 19*REGBYTES(t0) // Initial t0 + STORE x0, 20*REGBYTES(t0) // Initial a7 + STORE x0, 21*REGBYTES(t0) // Initial a6 + STORE x0, 22*REGBYTES(t0) // Initial a5 + STORE x0, 23*REGBYTES(t0) // Initial a4 + STORE x0, 24*REGBYTES(t0) // Initial a3 + STORE x0, 25*REGBYTES(t0) // Initial a2 + STORE x0, 26*REGBYTES(t0) // Initial a1 + STORE x0, 27*REGBYTES(t0) // Initial a0 + STORE x0, 28*REGBYTES(t0) // Initial ra + STORE a1, 30*REGBYTES(t0) // Initial mepc +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + STORE x0, 31*REGBYTES(t0) // Inital ft0 + STORE x0, 32*REGBYTES(t0) // Inital ft1 + STORE x0, 33*REGBYTES(t0) // Inital ft2 + STORE x0, 34*REGBYTES(t0) // Inital ft3 + STORE x0, 35*REGBYTES(t0) // Inital ft4 + STORE x0, 36*REGBYTES(t0) // Inital ft5 + STORE x0, 37*REGBYTES(t0) // Inital ft6 + STORE x0, 38*REGBYTES(t0) // Inital ft7 + STORE x0, 39*REGBYTES(t0) // Inital fs0 + STORE x0, 40*REGBYTES(t0) // Inital fs1 + STORE x0, 41*REGBYTES(t0) // Inital fa0 + STORE x0, 42*REGBYTES(t0) // Inital fa1 + STORE x0, 43*REGBYTES(t0) // Inital fa2 + STORE x0, 44*REGBYTES(t0) // Inital fa3 + STORE x0, 45*REGBYTES(t0) // Inital fa4 + STORE x0, 46*REGBYTES(t0) // Inital fa5 + STORE x0, 47*REGBYTES(t0) // Inital fa6 + STORE x0, 48*REGBYTES(t0) // Inital fa7 + STORE x0, 49*REGBYTES(t0) // Inital fs2 + STORE x0, 50*REGBYTES(t0) // Inital fs3 + STORE x0, 51*REGBYTES(t0) // Inital fs4 + STORE x0, 52*REGBYTES(t0) // Inital fs5 + STORE x0, 53*REGBYTES(t0) // Inital fs6 + STORE x0, 54*REGBYTES(t0) // Inital fs7 + STORE x0, 55*REGBYTES(t0) // Inital fs8 + STORE x0, 56*REGBYTES(t0) // Inital fs9 + STORE x0, 57*REGBYTES(t0) // Inital fs10 + STORE x0, 58*REGBYTES(t0) // Inital fs11 + STORE x0, 59*REGBYTES(t0) // Inital ft8 + STORE x0, 60*REGBYTES(t0) // Inital ft9 + STORE x0, 61*REGBYTES(t0) // Inital ft10 + STORE x0, 62*REGBYTES(t0) // Inital ft11 + csrr a1, fcsr // Read fcsr and use it for initial value for each thread + STORE a1, 63*REGBYTES(t0) // Initial fscr + STORE x0, 64*REGBYTES(t0) // Reserved word (0) +#else + STORE x0, 31*REGBYTES(t0) // Reserved word (0) +#endif + + /* Setup stack pointer. */ + /* thread_ptr -> tx_thread_stack_ptr = t0; */ + + STORE t0, 2*REGBYTES(a0) // Save stack pointer in thread's + ret // control block and return +/* } */ diff --git a/port/threadx/src/tx_thread_system_return.S b/port/threadx/src/tx_thread_system_return.S new file mode 100644 index 0000000..ec6ff78 --- /dev/null +++ b/port/threadx/src/tx_thread_system_return.S @@ -0,0 +1,174 @@ +/*************************************************************************** + * Copyright (c) 2024 Microsoft Corporation + * + * This program and the accompanying materials are made available under the + * terms of the MIT License which is available at + * https://opensource.org/licenses/MIT. + * + * SPDX-License-Identifier: MIT + **************************************************************************/ + + +/**************************************************************************/ +/**************************************************************************/ +/** */ +/** ThreadX Component */ +/** */ +/** Thread */ +/** */ +/**************************************************************************/ +/**************************************************************************/ + +#include "tx_port.h" + + .section .text +/**************************************************************************/ +/* */ +/* FUNCTION RELEASE */ +/* */ +/* _tx_thread_system_return RISC-V64/GNU */ +/* 6.2.1 */ +/* AUTHOR */ +/* */ +/* Scott Larson, Microsoft Corporation */ +/* */ +/* DESCRIPTION */ +/* */ +/* This function is target processor specific. It is used to transfer */ +/* control from a thread back to the system. Only a minimal context */ +/* is saved since the compiler assumes temp registers are going to get */ +/* slicked by a function call anyway. */ +/* */ +/* INPUT */ +/* */ +/* None */ +/* */ +/* OUTPUT */ +/* */ +/* None */ +/* */ +/* CALLS */ +/* */ +/* _tx_thread_schedule Thread scheduling loop */ +/* */ +/* CALLED BY */ +/* */ +/* ThreadX components */ +/* */ +/* RELEASE HISTORY */ +/* */ +/* DATE NAME DESCRIPTION */ +/* */ +/* 03-08-2023 Scott Larson Initial Version 6.2.1 */ +/* */ +/**************************************************************************/ +/* VOID _tx_thread_system_return(VOID) +{ */ + .global _tx_thread_system_return +_tx_thread_system_return: + + /* Save minimal context on the stack. */ + +#if defined(__riscv_float_abi_single) || defined(__riscv_float_abi_double) + addi sp, sp, -29*REGBYTES // Allocate space on the stack - with floating point enabled +#else + addi sp, sp, -16*REGBYTES // Allocate space on the stack - without floating point enabled +#endif + + /* Store floating point preserved registers. */ +#if defined(__riscv_float_abi_single) + fsw f8, 15*REGBYTES(sp) // Store fs0 + fsw f9, 16*REGBYTES(sp) // Store fs1 + fsw f18, 17*REGBYTES(sp) // Store fs2 + fsw f19, 18*REGBYTES(sp) // Store fs3 + fsw f20, 19*REGBYTES(sp) // Store fs4 + fsw f21, 20*REGBYTES(sp) // Store fs5 + fsw f22, 21*REGBYTES(sp) // Store fs6 + fsw f23, 22*REGBYTES(sp) // Store fs7 + fsw f24, 23*REGBYTES(sp) // Store fs8 + fsw f25, 24*REGBYTES(sp) // Store fs9 + fsw f26, 25*REGBYTES(sp) // Store fs10 + fsw f27, 26*REGBYTES(sp) // Store fs11 + csrr t0, fcsr + STORE t0, 27*REGBYTES(sp) // Store fcsr +#elif defined(__riscv_float_abi_double) + fsd f8, 15*REGBYTES(sp) // Store fs0 + fsd f9, 16*REGBYTES(sp) // Store fs1 + fsd f18, 17*REGBYTES(sp) // Store fs2 + fsd f19, 18*REGBYTES(sp) // Store fs3 + fsd f20, 19*REGBYTES(sp) // Store fs4 + fsd f21, 20*REGBYTES(sp) // Store fs5 + fsd f22, 21*REGBYTES(sp) // Store fs6 + fsd f23, 22*REGBYTES(sp) // Store fs7 + fsd f24, 23*REGBYTES(sp) // Store fs8 + fsd f25, 24*REGBYTES(sp) // Store fs9 + fsd f26, 25*REGBYTES(sp) // Store fs10 + fsd f27, 26*REGBYTES(sp) // Store fs11 + csrr t0, fcsr + STORE t0, 27*REGBYTES(sp) // Store fcsr +#endif + + STORE x0, 0(sp) // Solicited stack type + STORE x1, 13*REGBYTES(sp) // Save RA + STORE x8, 12*REGBYTES(sp) // Save s0 + STORE x9, 11*REGBYTES(sp) // Save s1 + STORE x18, 10*REGBYTES(sp) // Save s2 + STORE x19, 9*REGBYTES(sp) // Save s3 + STORE x20, 8*REGBYTES(sp) // Save s4 + STORE x21, 7*REGBYTES(sp) // Save s5 + STORE x22, 6*REGBYTES(sp) // Save s6 + STORE x23, 5*REGBYTES(sp) // Save s7 + STORE x24, 4*REGBYTES(sp) // Save s8 + STORE x25, 3*REGBYTES(sp) // Save s9 + STORE x26, 2*REGBYTES(sp) // Save s10 + STORE x27, 1*REGBYTES(sp) // Save s11 + csrr t0, mstatus // Pickup mstatus + STORE t0, 14*REGBYTES(sp) // Save mstatus + + + /* Lockout interrupts. - will be enabled in _tx_thread_schedule */ + + csrci mstatus, 0xF + +#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY + + call _tx_execution_thread_exit // Call the thread execution exit function +#endif + + la t0, _tx_thread_current_ptr // Pickup address of pointer + LOAD t1, 0(t0) // Pickup current thread pointer + la t2,_tx_thread_system_stack_ptr // Pickup stack pointer address + + /* Save current stack and switch to system stack. */ + /* _tx_thread_current_ptr -> tx_thread_stack_ptr = SP; + SP = _tx_thread_system_stack_ptr; */ + + STORE sp, 2*REGBYTES(t1) // Save stack pointer + LOAD sp, 0(t2) // Switch to system stack + + /* Determine if the time-slice is active. */ + /* if (_tx_timer_time_slice) + { */ + + la t4, _tx_timer_time_slice // Pickup time slice variable addr + lw t3, 0(t4) // Pickup time slice value + la t2, _tx_thread_schedule // Pickup address of scheduling loop + beqz t3, _tx_thread_dont_save_ts // If no time-slice, don't save it + + /* Save time-slice for the thread and clear the current time-slice. */ + /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice; + _tx_timer_time_slice = 0; */ + + sw t3, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time-slice for thread + sw x0, 0(t4) // Clear time-slice variable + + /* } */ +_tx_thread_dont_save_ts: + + /* Clear the current thread pointer. */ + /* _tx_thread_current_ptr = TX_NULL; */ + + STORE x0, 0(t0) // Clear current thread pointer + jr t2 // Return to thread scheduler + +/* } */ diff --git a/src/flash.lds b/src/flash.lds new file mode 100644 index 0000000..ecf3e89 --- /dev/null +++ b/src/flash.lds @@ -0,0 +1,12 @@ +OUTPUT_ARCH( "riscv" ) + +ENTRY( _start ) + +INCLUDE memory_map.ld + +REGION_ALIAS("REGION_TEXT", flash); +REGION_ALIAS("REGION_RODATA", flash); +REGION_ALIAS("REGION_DATA", ram); +REGION_ALIAS("REGION_BSS", ram); + +INCLUDE sections.ld \ No newline at end of file diff --git a/src/memory_map.ld b/src/memory_map.ld new file mode 100644 index 0000000..378d716 --- /dev/null +++ b/src/memory_map.ld @@ -0,0 +1,7 @@ +MEMORY +{ + ram (wxa!ri) : ORIGIN = 0x00000000, LENGTH = 128K + rom (rxai!w) : ORIGIN = 0x10080000, LENGTH = 8k + flash (rxai!w) : ORIGIN = 0x20000000, LENGTH = 16M + dram (wxa!ri) : ORIGIN = 0x40000000, LENGTH = 2048M +} diff --git a/src/ram.lds b/src/ram.lds new file mode 100644 index 0000000..d8158e5 --- /dev/null +++ b/src/ram.lds @@ -0,0 +1,12 @@ +OUTPUT_ARCH( "riscv" ) + +ENTRY( _start ) + +INCLUDE memory_map.ld + +REGION_ALIAS("REGION_TEXT", ram); +REGION_ALIAS("REGION_RODATA", ram); +REGION_ALIAS("REGION_DATA", ram); +REGION_ALIAS("REGION_BSS", ram); + +INCLUDE sections.ld \ No newline at end of file diff --git a/src/rom.lds b/src/rom.lds new file mode 100644 index 0000000..b730a16 --- /dev/null +++ b/src/rom.lds @@ -0,0 +1,12 @@ +OUTPUT_ARCH( "riscv" ) + +ENTRY( _start ) + +INCLUDE memory_map.ld + +REGION_ALIAS("REGION_TEXT", rom); +REGION_ALIAS("REGION_RODATA", rom); +REGION_ALIAS("REGION_DATA", ram); +REGION_ALIAS("REGION_BSS", ram); + +INCLUDE sections.ld \ No newline at end of file diff --git a/src/sections.ld b/src/sections.ld new file mode 100644 index 0000000..c87cea5 --- /dev/null +++ b/src/sections.ld @@ -0,0 +1,184 @@ +PHDRS +{ + flash PT_LOAD; + ram_init PT_LOAD; + tls PT_TLS; + ram PT_NULL; + dram PT_NULL; +} + +SECTIONS +{ + __stack_size = DEFINED(__stack_size) ? __stack_size : 2K; + __stack_segment_size = DEFINED(__stack_segment_size) ? __stack_segment_size : __stack_size; + .init ORIGIN(REGION_TEXT) : + { + KEEP (*(.text.init.enter)) + KEEP (*(.text.init.*)) + KEEP (*(.text.init)) + KEEP (*(SORT_NONE(.init))) + KEEP (*(.text.libgloss.start)) + } >REGION_TEXT AT>REGION_TEXT :flash + + .text : + { + *(.text.unlikely .text.unlikely.*) + *(.text.startup .text.startup.*) + *(.text .text.*) + *(.gnu.linkonce.t.*) + } >REGION_TEXT AT>REGION_TEXT :flash + + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } >REGION_TEXT AT>REGION_TEXT :flash + + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .rodata : + { + *(.rdata) + *(.rodata .rodata.*) + *(.gnu.linkonce.r.*) + } >REGION_RODATA AT>REGION_RODATA :flash + + .srodata : + { + *(.srodata.cst16) + *(.srodata.cst8) + *(.srodata.cst4) + *(.srodata.cst2) + *(.srodata .srodata.*) + } >REGION_RODATA AT>REGION_RODATA :flash + + . = ALIGN(4); + + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } >REGION_RODATA AT>REGION_RODATA :flash + + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors)) + PROVIDE_HIDDEN (__init_array_end = .); + } >REGION_RODATA AT>REGION_RODATA :flash + + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors)) + PROVIDE_HIDDEN (__fini_array_end = .); + } >REGION_RODATA AT>REGION_RODATA :flash + + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } >REGION_RODATA AT>REGION_RODATA :flash + + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } >REGION_RODATA AT>REGION_RODATA :flash + + .lalign : + { + . = ALIGN(4); + PROVIDE( _data_lma = . ); + } >REGION_RODATA AT>REGION_RODATA :flash + + .data : ALIGN(4) + { + __DATA_BEGIN__ = .; + *(.data .data.*) + *(.gnu.linkonce.d.*) + __SDATA_BEGIN__ = .; + *(.sdata .sdata.*) + *(.gnu.linkonce.s.*) + } >REGION_DATA AT>REGION_RODATA :ram_init + + .tdata : ALIGN(8) { + PROVIDE( __tls_base = . ); + *(.tdata .tdata.* .gnu.linkonce.td.*) + } >REGION_DATA AT>REGION_RODATA :tls :ram_init + + PROVIDE( __data_source = LOADADDR(.data) ); + PROVIDE( __data_start = ADDR(.data) ); + PROVIDE( __data_end = ADDR(.tdata) + SIZEOF(.tdata) ); + + PROVIDE( __tdata_source = LOADADDR(.tdata) ); + PROVIDE( __tdata_size = SIZEOF(.tdata) ); + + .tbss : ALIGN(8) { + *(.tbss .tbss.* .gnu.linkonce.tb.*) + *(.tcommon .tcommon.*) + PROVIDE( __tls_end = . ); + } >REGION_DATA AT>REGION_DATA :tls :ram + + PROVIDE( __tbss_size = SIZEOF(.tbss) ); + PROVIDE( __tls_size = __tls_end - __tls_base ); + + .tbss_space : ALIGN(8) { + . = . + __tbss_size; + } >REGION_DATA :ram + + .bss : + { + __BSS_BEGIN__ = .; + *(.sbss*) + *(.gnu.linkonce.sb.*) + *(.bss .bss.*) + *(.gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(4); + __BSS_END__ = .; + + } >REGION_BSS AT>REGION_BSS :ram + + PROVIDE( __bss_start = ADDR(.tbss) ); + PROVIDE( __bss_end = ADDR(.bss) + SIZEOF(.bss) ); + + __global_pointer$ = MIN(__SDATA_BEGIN__ + 0x800, MAX(__DATA_BEGIN__ + 0x800, __BSS_END__ - 0x800)); + PROVIDE( _end = . ); + PROVIDE( end = . ); + + .stack ORIGIN(ram) + LENGTH(ram) - __stack_segment_size : + { + PROVIDE( _heap_end = . ); + . = __stack_segment_size; + PROVIDE( _sp = . ); + } >REGION_BSS AT>REGION_BSS :ram + + + + PROVIDE( tohost = . ); + PROVIDE( fromhost = . + 8 ); +} diff --git a/src/tcp_demo/main.c b/src/tcp_demo/main.c new file mode 100644 index 0000000..f324c4a --- /dev/null +++ b/src/tcp_demo/main.c @@ -0,0 +1,337 @@ +/* This is a small demo of the high-performance NetX Duo TCP/IP stack. + This program demonstrates ICMPv6 protocols Neighbor Discovery and + Stateless Address Configuration for IPv6, ARP for IPv4, and + TCP packet sending and receiving with a simulated Ethernet driver. */ + +#include "nx_api.h" +#include "tx_api.h" +#include +#define DEMO_STACK_SIZE 2048 +#define DEMO_DATA "ABCDEFGHIJKLMNOPQRSTUVWXYZ " +#define PACKET_SIZE 1536 +#define POOL_SIZE ((sizeof(NX_PACKET) + PACKET_SIZE) * 8) +#define NX_DISABLE_IPV6 + +/* Define the ThreadX and NetX object control blocks... */ + +TX_THREAD thread_0; +TX_THREAD thread_1; + +NX_PACKET_POOL pool_0; +NX_IP ip_0; +NX_IP ip_1; +NX_TCP_SOCKET client_socket; +NX_TCP_SOCKET server_socket; +UCHAR pool_buffer[POOL_SIZE]; + +/* Define the counters used in the demo application... */ +ULONG thread_0_counter; +ULONG thread_1_counter; +ULONG error_counter; + +/* Define thread prototypes. */ + +void thread_0_entry(ULONG thread_input); +void thread_1_entry(ULONG thread_input); +void thread_1_connect_received(NX_TCP_SOCKET* server_socket, UINT port); +void thread_1_disconnect_received(NX_TCP_SOCKET* server_socket); + +void _nx_ram_network_driver(struct NX_IP_DRIVER_STRUCT* driver_req); +void _nx_mnrs_network_driver(struct NX_IP_DRIVER_STRUCT* driver_req); +#define NETWORK_DRIVER _nx_mnrs_network_driver +// alternative the ram driver can be used +// #define NETWORK_DRIVER _nx_ram_network_driver + +/* Define main entry point. */ + +int main() { + /* Enter the ThreadX kernel. */ + tx_kernel_enter(); +} + +/* Define what the initial system looks like. */ +void tx_application_define(void* first_unused_memory) { + CHAR* pointer; + UINT status; + puts("Setting up application"); + /* Setup the working pointer. */ + pointer = (CHAR*)first_unused_memory; + + /* Create the main thread. */ + tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0, pointer, DEMO_STACK_SIZE, 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START); + + pointer = pointer + DEMO_STACK_SIZE; + + /* Create the main thread. */ + tx_thread_create(&thread_1, "thread 1", thread_1_entry, 0, pointer, DEMO_STACK_SIZE, 3, 3, TX_NO_TIME_SLICE, TX_AUTO_START); + + pointer = pointer + DEMO_STACK_SIZE; + + /* Initialize the NetX system. */ + nx_system_initialize(); + + /* Create a packet pool. */ + status = nx_packet_pool_create(&pool_0, "packet_pool0", PACKET_SIZE, pool_buffer, POOL_SIZE); + + if(status) { + error_counter++; + } + + /* Create an IP instance. */ + status = nx_ip_create(&ip_0, "eth0", IP_ADDRESS(1, 2, 3, 4), 0xFFFFFF00UL, &pool_0, NETWORK_DRIVER, pointer, 2048, 1); + pointer = pointer + 2048; + + /* Create another IP instance. */ + status += nx_ip_create(&ip_1, "eth1", IP_ADDRESS(1, 2, 3, 5), 0xFFFFFF00UL, &pool_0, NETWORK_DRIVER, pointer, 2048, 1); + pointer = pointer + 2048; + + if(status) { + error_counter++; + } + + /* Enable ARP and supply ARP cache memory for IP Instance 0. */ + status = nx_arp_enable(&ip_0, (void*)pointer, 1024); + pointer = pointer + 1024; + + /* Enable ARP and supply ARP cache memory for IP Instance 1. */ + status += nx_arp_enable(&ip_1, (void*)pointer, 1024); + pointer = pointer + 1024; + + /* Check ARP enable status. */ + if(status) { + error_counter++; + } + + /* Enable ICMP */ + status = nxd_icmp_enable(&ip_0); + if(status) { + error_counter++; + } + + status = nxd_icmp_enable(&ip_1); + if(status) { + error_counter++; + } + + /* Enable TCP processing for both IP instances. */ + status = nx_tcp_enable(&ip_0); + status += nx_tcp_enable(&ip_1); + + puts("Successfuly set up application"); +} + +/* Define the test threads. */ + +void thread_0_entry(ULONG thread_input) { + + UINT status; + NX_PACKET* my_packet; + ULONG length; + + NXD_ADDRESS server_ipv4_address; + NXD_ADDRESS peer_address; + ULONG peer_port; + + NX_PARAMETER_NOT_USED(thread_input); + + tx_thread_sleep(NX_IP_PERIODIC_RATE); + + /* set the TCP server addresses. */ + server_ipv4_address.nxd_ip_version = NX_IP_VERSION_V4; + server_ipv4_address.nxd_ip_address.v4 = IP_ADDRESS(1, 2, 3, 5); + /* Loop to repeat things over and over again! */ + puts("Entering client loop"); + while(1) { + + /* Create a socket. */ + status = nx_tcp_socket_create(&ip_0, &client_socket, "Client Socket", NX_IP_NORMAL, NX_FRAGMENT_OKAY, NX_IP_TIME_TO_LIVE, 200, + NX_NULL, NX_NULL); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Bind the socket. */ + status = nx_tcp_client_socket_bind(&client_socket, 12, NX_WAIT_FOREVER); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Attempt to connect the socket. */ + status = nxd_tcp_client_socket_connect(&client_socket, &server_ipv4_address, 12, NX_IP_PERIODIC_RATE); + + /* Check for error. */ + if(status) { + printf("Error with socket connect: 0x%x\n", status); + return; + } + + status = nxd_tcp_socket_peer_info_get(&client_socket, &peer_address, &peer_port); + + /* Allocate a packet. */ + status = nx_packet_allocate(&pool_0, &my_packet, NX_TCP_PACKET, NX_WAIT_FOREVER); + + /* Check status. */ + if(status != NX_SUCCESS) { + break; + } + + /* Write ABCs into the packet payload! */ + nx_packet_data_append(my_packet, DEMO_DATA, sizeof(DEMO_DATA), &pool_0, TX_WAIT_FOREVER); + + status = nx_packet_length_get(my_packet, &length); + if((status) || (length != sizeof(DEMO_DATA))) { + error_counter++; + } + + /* Send the packet out! */ + status = nx_tcp_socket_send(&client_socket, my_packet, NX_IP_PERIODIC_RATE); + + /* Determine if the status is valid. */ + if(status) { + error_counter++; + nx_packet_release(my_packet); + } + + /* Disconnect this socket. */ + status = nx_tcp_socket_disconnect(&client_socket, NX_IP_PERIODIC_RATE); + + /* Determine if the status is valid. */ + if(status) { + error_counter++; + } + + /* Unbind the socket. */ + status = nx_tcp_client_socket_unbind(&client_socket); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Delete the socket. */ + status = nx_tcp_socket_delete(&client_socket); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Increment thread 0's counter. */ + thread_0_counter++; + } +} + +void thread_1_entry(ULONG thread_input) { + + UINT status; + NX_PACKET* packet_ptr; + ULONG actual_status; + + NX_PARAMETER_NOT_USED(thread_input); + + /* Wait 1 second for the IP thread to finish its initilization. */ + tx_thread_sleep(NX_IP_PERIODIC_RATE); + + /* Ensure the IP instance has been initialized. */ + status = nx_ip_status_check(&ip_1, NX_IP_INITIALIZE_DONE, &actual_status, NX_IP_PERIODIC_RATE); + + /* Check status... */ + if(status != NX_SUCCESS) { + + error_counter++; + return; + } + + /* Create a socket. */ + status = nx_tcp_socket_create(&ip_1, &server_socket, "Server Socket", NX_IP_NORMAL, NX_FRAGMENT_OKAY, NX_IP_TIME_TO_LIVE, 100, NX_NULL, + thread_1_disconnect_received); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Setup this thread to listen. */ + status = nx_tcp_server_socket_listen(&ip_1, 12, &server_socket, 5, thread_1_connect_received); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Loop to create and establish server connections. */ + puts("Entering server loop"); + while(1) { + + /* Increment thread 1's counter. */ + thread_1_counter++; + + /* Accept a client socket connection. */ + status = nx_tcp_server_socket_accept(&server_socket, NX_WAIT_FOREVER); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Receive a TCP message from the socket. */ + status = nx_tcp_socket_receive(&server_socket, &packet_ptr, NX_IP_PERIODIC_RATE); + + /* Check for error. */ + if(status) { + error_counter++; + } else { + char buffer[64]; + ULONG size; + nx_packet_data_extract_offset(packet_ptr, 0, buffer, 64, &size); + buffer[size] = 0; + printf("Received packet %lu with %s\n", thread_1_counter, buffer); + /* Release the packet. */ + nx_packet_release(packet_ptr); + } + + /* Disconnect the server socket. */ + status = nx_tcp_socket_disconnect(&server_socket, NX_IP_PERIODIC_RATE); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Unaccept the server socket. */ + status = nx_tcp_server_socket_unaccept(&server_socket); + + /* Check for error. */ + if(status) { + error_counter++; + } + + /* Setup server socket for listening again. */ + status = nx_tcp_server_socket_relisten(&ip_1, 12, &server_socket); + + /* Check for error. */ + if(status) { + error_counter++; + } + } +} + +void thread_1_connect_received(NX_TCP_SOCKET* socket_ptr, UINT port) { + + /* Check for the proper socket and port. */ + if((socket_ptr != &server_socket) || (port != 12)) { + error_counter++; + } +} + +void thread_1_disconnect_received(NX_TCP_SOCKET* socket) { + + /* Check for proper disconnected socket. */ + if(socket != &server_socket) { + error_counter++; + } +} diff --git a/src/thread_demo/main.c b/src/thread_demo/main.c new file mode 100644 index 0000000..6c12410 --- /dev/null +++ b/src/thread_demo/main.c @@ -0,0 +1,321 @@ +/* This is a small demo of the high-performance ThreadX kernel. It includes + examples of eight threads of different priorities, using a message queue, + semaphore, mutex, event flags group, byte pool, and block pool. */ + +#include "tx_api.h" +#include + +#define DEMO_STACK_SIZE 1024 +#define DEMO_BYTE_POOL_SIZE 9120 +#define DEMO_BLOCK_POOL_SIZE 100 +#define DEMO_QUEUE_SIZE 50 + +/* Define the ThreadX object control blocks... */ + +TX_THREAD thread_0; +TX_THREAD thread_1; +TX_THREAD thread_2; +TX_THREAD thread_3; +TX_THREAD thread_4; +TX_THREAD thread_5; +TX_THREAD thread_6; +TX_THREAD thread_7; +TX_QUEUE queue_0; +TX_SEMAPHORE semaphore_0; +TX_MUTEX mutex_0; +TX_EVENT_FLAGS_GROUP event_flags_0; +TX_BYTE_POOL byte_pool_0; +TX_BLOCK_POOL block_pool_0; +UCHAR memory_area[DEMO_BYTE_POOL_SIZE]; + +/* Define the counters used in the demo application... */ + +ULONG thread_0_counter; +ULONG thread_1_counter; +ULONG thread_1_messages_sent; +ULONG thread_2_counter; +ULONG thread_2_messages_received; +ULONG thread_3_counter; +ULONG thread_4_counter; +ULONG thread_5_counter; +ULONG thread_6_counter; +ULONG thread_7_counter; + +/* Define thread prototypes. */ + +void thread_0_entry(ULONG thread_input); +void thread_1_entry(ULONG thread_input); +void thread_2_entry(ULONG thread_input); +void thread_3_and_4_entry(ULONG thread_input); +void thread_5_entry(ULONG thread_input); +void thread_6_and_7_entry(ULONG thread_input); + +/* Define main entry point. */ +int main() { + /* Enter the ThreadX kernel. */ + tx_kernel_enter(); +} + +/* Define what the initial system looks like. */ + +void tx_application_define(void* first_unused_memory) { + + CHAR* pointer = TX_NULL; + + /* Create a byte memory pool from which to allocate the thread stacks. */ + tx_byte_pool_create(&byte_pool_0, "byte pool 0", memory_area, DEMO_BYTE_POOL_SIZE); + + /* Put system definition stuff in here, e.g. thread creates and other assorted + create information. */ + + /* Allocate the stack for thread 0. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + /* Create the main thread. */ + tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0, pointer, DEMO_STACK_SIZE, 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the stack for thread 1. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + /* Create threads 1 and 2. These threads pass information through a ThreadX + message queue. It is also interesting to note that these threads have a + time slice. */ + tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1, pointer, DEMO_STACK_SIZE, 16, 16, 4, TX_AUTO_START); + + /* Allocate the stack for thread 2. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2, pointer, DEMO_STACK_SIZE, 16, 16, 4, TX_AUTO_START); + + /* Allocate the stack for thread 3. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + /* Create threads 3 and 4. These threads compete for a ThreadX counting + semaphore. An interesting thing here is that both threads share the same + instruction area. */ + tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the stack for thread 4. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the stack for thread 5. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + /* Create thread 5. This thread simply pends on an event flag which will be + set by thread_0. */ + tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5, pointer, DEMO_STACK_SIZE, 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the stack for thread 6. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */ + tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the stack for thread 7. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_STACK_SIZE, TX_NO_WAIT); + + tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7, pointer, DEMO_STACK_SIZE, 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START); + + /* Allocate the message queue. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_QUEUE_SIZE * sizeof(ULONG), TX_NO_WAIT); + + /* Create the message queue shared by threads 1 and 2. */ + tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE * sizeof(ULONG)); + + /* Create the semaphore used by threads 3 and 4. */ + tx_semaphore_create(&semaphore_0, "semaphore 0", 1); + + /* Create the event flags group used by threads 1 and 5. */ + tx_event_flags_create(&event_flags_0, "event flags 0"); + + /* Create the mutex used by thread 6 and 7 without priority inheritance. */ + tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT); + + /* Allocate the memory for a small block pool. */ + tx_byte_allocate(&byte_pool_0, (VOID**)&pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT); + + /* Create a block memory pool to allocate a message buffer from. */ + tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE); + + /* Allocate a block and release the block memory. */ + tx_block_allocate(&block_pool_0, (VOID**)&pointer, TX_NO_WAIT); + + /* Release the block back to the pool. */ + tx_block_release(pointer); +} + +/* Define the test threads. */ + +void thread_0_entry(ULONG thread_input) { + + UINT status; + + /* This thread simply sits in while-forever-sleep loop. */ + while(1) { + puts("[Thread] : thread_0_entry is here!"); + /* Increment the thread counter. */ + thread_0_counter++; + + /* Sleep for 10 ticks. */ + tx_thread_sleep(10); + + /* Set event flag 0 to wakeup thread 5. */ + status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + } +} + +void thread_1_entry(ULONG thread_input) { + + UINT status; + + /* This thread simply sends messages to a queue shared by thread 2. */ + while(1) { + /* Increment the thread counter. */ + thread_1_counter++; + + /* Send message to queue 0. */ + status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER); + + /* Check completion status. */ + if(status != TX_SUCCESS) + break; + + /* Increment the message sent. */ + thread_1_messages_sent++; + printf("Sent message %i\n", thread_1_messages_sent); + } +} + +void thread_2_entry(ULONG thread_input) { + + ULONG received_message; + UINT status; + + /* This thread retrieves messages placed on the queue by thread 1. */ + while(1) { + /* Increment the thread counter. */ + thread_2_counter++; + + /* Retrieve a message from the queue. */ + status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER); + printf("Received message %i\n", received_message); + + /* Check completion status and make sure the message is what we + expected. */ + if((status != TX_SUCCESS) || (received_message != thread_2_messages_received)) + break; + + /* Otherwise, all is okay. Increment the received message count. */ + thread_2_messages_received++; + } +} + +void thread_3_and_4_entry(ULONG thread_input) { + + UINT status; + + /* This function is executed from thread 3 and thread 4. As the loop + below shows, these function compete for ownership of semaphore_0. */ + while(1) { + puts("[Thread] : thread_3_and_4_entry is here!"); + + /* Increment the thread counter. */ + if(thread_input == 3) + thread_3_counter++; + else + thread_4_counter++; + + /* Get the semaphore with suspension. */ + status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + + /* Sleep for 2 ticks to hold the semaphore. */ + tx_thread_sleep(2); + + /* Release the semaphore. */ + status = tx_semaphore_put(&semaphore_0); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + } +} + +void thread_5_entry(ULONG thread_input) { + + UINT status; + ULONG actual_flags; + + /* This thread simply waits for an event in a forever loop. */ + while(1) { + puts("[Thread] : thread_5_entry is here!"); + /* Increment the thread counter. */ + thread_5_counter++; + + /* Wait for event flag 0. */ + status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR, &actual_flags, TX_WAIT_FOREVER); + + /* Check status. */ + if((status != TX_SUCCESS) || (actual_flags != 0x1)) + break; + } +} + +void thread_6_and_7_entry(ULONG thread_input) { + + UINT status; + + /* This function is executed from thread 6 and thread 7. As the loop + below shows, these function compete for ownership of mutex_0. */ + while(1) { + puts("[Thread] : thread_6_and_7_entry is here!"); + /* Increment the thread counter. */ + if(thread_input == 6) + thread_6_counter++; + else + thread_7_counter++; + + /* Get the mutex with suspension. */ + status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + + /* Get the mutex again with suspension. This shows + that an owning thread may retrieve the mutex it + owns multiple times. */ + status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + + /* Sleep for 2 ticks to hold the mutex. */ + tx_thread_sleep(2); + + /* Release the mutex. */ + status = tx_mutex_put(&mutex_0); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + + /* Release the mutex again. This will actually + release ownership since it was obtained twice. */ + status = tx_mutex_put(&mutex_0); + + /* Check status. */ + if(status != TX_SUCCESS) + break; + } +} diff --git a/third-party/netxduo b/third-party/netxduo new file mode 160000 index 0000000..b54bf01 --- /dev/null +++ b/third-party/netxduo @@ -0,0 +1 @@ +Subproject commit b54bf0180199be4a9677d55113d6c5f5ed1879bc diff --git a/third-party/picolibc b/third-party/picolibc new file mode 160000 index 0000000..f7edb08 --- /dev/null +++ b/third-party/picolibc @@ -0,0 +1 @@ +Subproject commit f7edb0869e64b9d687f46961adeef7154921fe92 diff --git a/third-party/threadx b/third-party/threadx new file mode 160000 index 0000000..4b6e810 --- /dev/null +++ b/third-party/threadx @@ -0,0 +1 @@ +Subproject commit 4b6e8100d932a3a67b34c6eb17f84f3bffb9e2ae