Compare commits

...

4 Commits

Author SHA1 Message Date
alex a6a6a034d6 removes convoluted generation of offsets
Ass ULONG is now 8 bytes on RV64, the SMP offset is the only one left, harcoding it as a magic number is simpler
2026-04-02 11:35:37 +02:00
alex 9212d40d86 adds newest version of patched threadx tests 2026-04-02 11:00:50 +02:00
alex c16e6b0446 adjust SMP port to also use ULONG = 8 bytes 2026-04-02 10:48:43 +02:00
alex eda2a85dc1 makes ULONG 64 bit on RV64 2026-04-02 10:31:12 +02:00
20 changed files with 80 additions and 175 deletions
+2 -7
View File
@@ -66,9 +66,7 @@
#define LWU lw
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define TX_THREAD_STACK_END_OFFSET 2 * 4 + 2 * REGBYTES
#define TX_THREAD_TIME_SLICE_OFFSET 3 * 4 + 3 * REGBYTES
#define REGBYTES (1 << LOG_REGBYTES)
#else /*not __ASSEMBLER__ */
@@ -96,14 +94,11 @@ typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef int LONG;
typedef unsigned int ULONG; // ThreadX expects ULONG to be 32 bit
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
#define ULONG64_DEFINED
#define ALIGN_TYPE_DEFINED
// Since ULONG is not actually unsigned long, it is to small to hold pointers for 64-bit systems
#define ALIGN_TYPE unsigned long
/* Define the priority levels for ThreadX. Legal values range
from 32 to 1024 and MUST be evenly divisible by 32. */
+6 -6
View File
@@ -80,9 +80,9 @@ _tx_thread_context_restore:
{ */
la t0, _tx_thread_system_state // Pickup addr of nested interrupt count
lw t1, 0(t0) // Pickup nested interrupt count
LOAD t1, 0(t0) // Pickup nested interrupt count
addi t1, t1, -1 // Decrement the nested interrupt counter
sw t1, 0(t0) // Store new nested count
STORE t1, 0(t0) // Store new nested count
beqz t1, _tx_thread_not_nested_restore // If 0, not nested restore
/* Interrupts are nested. */
@@ -190,7 +190,7 @@ _tx_thread_not_nested_restore:
LOAD t1, _tx_thread_current_ptr // Pickup current thread pointer
beqz t1, _tx_thread_idle_system_restore // If NULL, idle system restore
LOAD t2, _tx_thread_preempt_disable // Pickup preempt disable flag
LWU t2, _tx_thread_preempt_disable // Pickup preempt disable flag
bgtz t2, _tx_thread_no_preempt_restore // If set, restore interrupted thread
LOAD t2, _tx_thread_execute_ptr // Pickup thread execute pointer
@@ -354,14 +354,14 @@ _tx_thread_preempt_restore:
{ */
la t0, _tx_timer_time_slice // Pickup time slice variable address
lw t2, 0(t0) // Pickup time slice
LOAD t2, 0(t0) // Pickup time slice
beqz t2, _tx_thread_dont_save_ts // If 0, skip time slice processing
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice
_tx_timer_time_slice = 0; */
sw t2, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time slice
sw x0, 0(t0) // Clear global time slice
STORE t2, 6*REGBYTES(t1) // Save current time slice
STORE x0, 0(t0) // Clear global time slice
/* } */
+2 -2
View File
@@ -74,14 +74,14 @@ _tx_thread_context_save:
STORE t1, 18*REGBYTES(sp)
la t0, _tx_thread_system_state // Pickup address of system state
lw t1, 0(t0) // Pickup system state
LOAD t1, 0(t0) // Pickup system state
/* Check for a nested interrupt condition. */
/* if (_tx_thread_system_state++)
{ */
beqz t1, _tx_thread_not_nested_save // If 0, first interrupt condition
addi t1, t1, 1 // Increment the interrupt counter
sw t1, 0(t0) // Store the interrupt counter
STORE t1, 0(t0) // Store the interrupt counter
/* Nested interrupt condition.
Save the reset of the scratch registers on the stack and return to the
+1 -1
View File
@@ -105,12 +105,12 @@ _tx_thread_schedule_loop:
/* _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice; */
la t2, _tx_timer_time_slice // Pickup time-slice variable address
STORE t3, 0(t2) // Store new time-slice*/
/* Switch to the thread's stack. */
/* SP = _tx_thread_execute_ptr -> tx_thread_stack_ptr; */
LOAD sp, 2*REGBYTES(t1) // Switch to thread's stack
STORE t3, 0(t2) // Store new time-slice*/
#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+1 -1
View File
@@ -138,7 +138,7 @@ If floating point support:
Stack Bottom: (higher memory address) */
LOAD t0, TX_THREAD_STACK_END_OFFSET(a0) // Pickup end of stack area
LOAD t0, 4*REGBYTES(a0) // Pickup end of stack area
andi t0, t0, -16 // Ensure 16-byte alignment
/* Actually build the stack frame. */
+3 -3
View File
@@ -151,7 +151,7 @@ _tx_thread_system_return:
{ */
la t4, _tx_timer_time_slice // Pickup time slice variable addr
lw t3, 0(t4) // Pickup time slice value
LOAD t3, 0(t4) // Pickup time slice value
la t2, _tx_thread_schedule // Pickup address of scheduling loop
beqz t3, _tx_thread_dont_save_ts // If no time-slice, don't save it
@@ -159,8 +159,8 @@ _tx_thread_system_return:
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
_tx_timer_time_slice = 0; */
sw t3, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time-slice for thread
sw x0, 0(t4) // Clear time-slice variable
STORE t3, 6*REGBYTES(t1) // Save current time-slice for thread
STORE x0, 0(t4) // Clear time-slice variable
/* } */
_tx_thread_dont_save_ts:
-19
View File
@@ -1,8 +1,6 @@
cmake_minimum_required(VERSION 3.24)
project(smp_demo LANGUAGES C ASM)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/ThreadXSmpOffsets.cmake)
set(THREADX_COMMON_SMP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../third-party/threadx/common_smp)
if(NOT EXISTS "${THREADX_COMMON_SMP_DIR}")
message(FATAL_ERROR "could not find ThreadX SMP sources, is the submodule checked out?")
@@ -31,19 +29,6 @@ set(THREADX_SMP_CUSTOM_SRC
src/tx_timer_interrupt.c
)
threadx_smp_add_offsets(
TARGET threadx_smp_offsets
OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/generated
SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/tx_asm_offsets.c
INCLUDE_DIRS
${THREADX_COMMON_SMP_DIR}/inc
${THREADX_SMP_CUSTOM_INC}
DEPENDS
${CMAKE_CURRENT_SOURCE_DIR}/inc/tx_port.h
${THREADX_COMMON_SMP_DIR}/inc/tx_api.h
OUT_INCLUDE_DIR THREADX_SMP_GENERATED_INC_DIR
)
set(THREADX_SMP_SOURCES
${THREADX_COMMON_SMP_DIR}/src/tx_block_allocate.c
${THREADX_COMMON_SMP_DIR}/src/tx_block_pool_cleanup.c
@@ -250,8 +235,4 @@ target_include_directories(threadx_smp PUBLIC
${THREADX_COMMON_SMP_DIR}/inc
${THREADX_SMP_CUSTOM_INC}
)
target_include_directories(threadx_smp PRIVATE
${THREADX_SMP_GENERATED_INC_DIR}
)
target_compile_definitions(threadx_smp PRIVATE TX_QUEUE_MESSAGE_MAX_SIZE=16) #This is addressed in PR #503
add_dependencies(threadx_smp threadx_smp_offsets)
@@ -1,67 +0,0 @@
function(threadx_smp_add_offsets)
set(options)
set(oneValueArgs TARGET OUTPUT_DIR SOURCE OUT_INCLUDE_DIR)
set(multiValueArgs INCLUDE_DIRS COMPILE_DEFINITIONS DEPENDS)
cmake_parse_arguments(THREADX_SMP_OFFSETS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(NOT THREADX_SMP_OFFSETS_TARGET)
message(FATAL_ERROR "threadx_smp_add_offsets requires TARGET")
endif()
if(NOT THREADX_SMP_OFFSETS_OUTPUT_DIR)
message(FATAL_ERROR "threadx_smp_add_offsets requires OUTPUT_DIR")
endif()
if(NOT THREADX_SMP_OFFSETS_SOURCE)
message(FATAL_ERROR "threadx_smp_add_offsets requires SOURCE")
endif()
if(NOT THREADX_SMP_OFFSETS_OUT_INCLUDE_DIR)
message(FATAL_ERROR "threadx_smp_add_offsets requires OUT_INCLUDE_DIR")
endif()
set(threadx_smp_generate_script "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/GenerateAsmOffsets.cmake")
set(threadx_smp_offset_asm "${THREADX_SMP_OFFSETS_OUTPUT_DIR}/tx_asm_offsets.s")
set(threadx_smp_offset_inc "${THREADX_SMP_OFFSETS_OUTPUT_DIR}/tx_asm_offsets.inc")
set(threadx_smp_offset_include_args ${THREADX_SMP_OFFSETS_INCLUDE_DIRS})
list(TRANSFORM threadx_smp_offset_include_args PREPEND -I)
set(threadx_smp_offset_define_args ${THREADX_SMP_OFFSETS_COMPILE_DEFINITIONS})
list(TRANSFORM threadx_smp_offset_define_args PREPEND -D)
set(threadx_smp_offset_cflags ${CMAKE_C_FLAGS})
if(CMAKE_BUILD_TYPE)
string(TOUPPER "${CMAKE_BUILD_TYPE}" threadx_smp_build_type_upper)
list(APPEND threadx_smp_offset_cflags ${CMAKE_C_FLAGS_${threadx_smp_build_type_upper}})
endif()
separate_arguments(threadx_smp_offset_cflags)
add_custom_command(
OUTPUT ${threadx_smp_offset_inc}
BYPRODUCTS ${threadx_smp_offset_asm}
COMMAND ${CMAKE_COMMAND} -E make_directory ${THREADX_SMP_OFFSETS_OUTPUT_DIR}
COMMAND ${CMAKE_C_COMPILER}
${CMAKE_C_COMPILER_ARG1}
${threadx_smp_offset_cflags}
${threadx_smp_offset_include_args}
${threadx_smp_offset_define_args}
-S
-o ${threadx_smp_offset_asm}
${THREADX_SMP_OFFSETS_SOURCE}
COMMAND ${CMAKE_COMMAND}
-DINPUT=${threadx_smp_offset_asm}
-DOUTPUT=${threadx_smp_offset_inc}
-P ${threadx_smp_generate_script}
DEPENDS
${THREADX_SMP_OFFSETS_SOURCE}
${threadx_smp_generate_script}
${THREADX_SMP_OFFSETS_DEPENDS}
COMMAND_EXPAND_LISTS
VERBATIM
)
add_custom_target(${THREADX_SMP_OFFSETS_TARGET} DEPENDS ${threadx_smp_offset_inc})
set(${THREADX_SMP_OFFSETS_OUT_INCLUDE_DIR} ${THREADX_SMP_OFFSETS_OUTPUT_DIR} PARENT_SCOPE)
endfunction()
+17 -13
View File
@@ -57,22 +57,26 @@
#ifdef __ASSEMBLER__
#if __riscv_xlen == 64
#define SLL32 sllw
#define STORE sd
#define LOAD ld
#define LWU lwu
#define LOG_REGBYTES 3
#define SLL32 sllw
#define STORE sd
#define LOAD ld
#define LWU lwu
#define AMOSWAP_AQ amoswap.d.aq
#define AMOSWAP_RL amoswap.d.rl
#define LOG_REGBYTES 3
#define TX_THREAD_SMP_LOCK_READY_BIT_OFFSET 312 // This changes if thread or timer internal extensions are used
#else
#define SLL32 sll
#define STORE sw
#define LOAD lw
#define LWU lw
#define LOG_REGBYTES 2
#define SLL32 sll
#define STORE sw
#define LOAD lw
#define LWU lw
#define AMOSWAP_AQ amoswap.w.aq
#define AMOSWAP_RL amoswap.w.rl
#define LOG_REGBYTES 2
#define TX_THREAD_SMP_LOCK_READY_BIT_OFFSET 168 // This changes if thread or timer internal extensions are used
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#include "tx_asm_offsets.inc"
#else /*not __ASSEMBLER__ */
/************* Define ThreadX SMP constants. *************/
@@ -166,7 +170,7 @@ typedef unsigned char UCHAR;
typedef int INT;
typedef unsigned int UINT;
typedef int LONG;
typedef unsigned int ULONG;
typedef unsigned long ULONG;
typedef unsigned long long ULONG64;
typedef short SHORT;
typedef unsigned short USHORT;
-12
View File
@@ -1,12 +0,0 @@
#include <stddef.h>
#include "tx_api.h"
#define TX_ASM_OFFSET(symbol, value) __asm__ volatile("\n.ascii \"-->" #symbol " %c0\\n\"" : : "i"(value))
void tx_asm_offsets_generate(void)
{
TX_ASM_OFFSET(TX_THREAD_STACK_END_OFFSET, offsetof(TX_THREAD, tx_thread_stack_end));
TX_ASM_OFFSET(TX_THREAD_TIME_SLICE_OFFSET, offsetof(TX_THREAD, tx_thread_time_slice));
TX_ASM_OFFSET(TX_THREAD_SMP_LOCK_READY_BIT_OFFSET, offsetof(TX_THREAD, tx_thread_smp_lock_ready_bit));
}
@@ -80,13 +80,13 @@ _tx_thread_context_restore:
{ */
csrr t3, mhartid // Pickup current hart ID
slli t4, t3, 2 // Build per-hart ULONG offset
slli t4, t3, LOG_REGBYTES // Build per-hart ULONG offset
slli t5, t3, LOG_REGBYTES // Build per-hart pointer offset
la t0, _tx_thread_system_state // Pickup base of system-state array
add t0, t0, t4 // Select this hart's system-state slot
lw t1, 0(t0) // Pickup nested interrupt count
LOAD t1, 0(t0) // Pickup nested interrupt count
addi t1, t1, -1 // Decrement the nested interrupt counter
sw t1, 0(t0) // Store new nested count
STORE t1, 0(t0) // Store new nested count
beqz t1, _tx_thread_not_nested_restore // If 0, not nested restore
/* Interrupts are nested. */
@@ -202,10 +202,10 @@ _tx_thread_not_nested_restore:
beq t1, t2, _tx_thread_no_preempt_restore // Same thread selected, no preemption
la t0, _tx_thread_smp_protection // Pickup protection structure
lw t2, 4(t0) // Pickup owning hart
LOAD t2, 1*REGBYTES(t0) // Pickup owning hart
bne t2, t3, _tx_thread_preempt_restore // If owned by another hart, preempt
LOAD t2, _tx_thread_preempt_disable // Pickup preempt disable flag
LWU t2, _tx_thread_preempt_disable // Pickup preempt disable flag
bgtz t2, _tx_thread_no_preempt_restore // If set, restore interrupted thread
@@ -367,14 +367,14 @@ _tx_thread_preempt_restore:
la t0, _tx_timer_time_slice // Pickup base of time-slice array
add t0, t0, t4 // Select this hart's time-slice slot
lw t2, 0(t0) // Pickup time slice
LOAD t2, 0(t0) // Pickup time slice
beqz t2, _tx_thread_dont_save_ts // If 0, skip time slice processing
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice
_tx_timer_time_slice = 0; */
sw t2, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time slice
sw x0, 0(t0) // Clear global time slice
STORE t2, 6*REGBYTES(t1) // Save current time slice
STORE x0, 0(t0) // Clear global time slice
/* } */
@@ -392,7 +392,7 @@ _tx_thread_dont_save_ts:
fence rw, rw // Publish current-thread clear before ready token
addi t0, t1, TX_THREAD_SMP_LOCK_READY_BIT_OFFSET // Pickup lock/ready-bit address
li t2, 1 // Rebuild ready token
amoswap.w.rl x0, t2, (t0) // Set thread ready token for reschedule
AMOSWAP_RL x0, t2, (t0) // Set thread ready token for reschedule
/* } */
_tx_thread_idle_system_restore:
@@ -77,18 +77,18 @@ _tx_thread_context_save:
STORE x29, 15*REGBYTES(sp) // Save t4 before reusing it
csrr t2, mhartid // Pickup current hart ID
slli t3, t2, 2 // Build per-hart ULONG offset
slli t3, t2, LOG_REGBYTES // Build per-hart ULONG offset
slli t4, t2, LOG_REGBYTES // Build per-hart pointer offset
la t1, _tx_thread_system_state // Pickup base of system state array
add t0, t1, t3 // Select this hart's system-state slot
lw t1, 0(t0) // Pickup system state
LOAD t1, 0(t0) // Pickup system state
/* Check for a nested interrupt condition. */
/* if (_tx_thread_system_state++)
{ */
beqz t1, _tx_thread_not_nested_save // If 0, first interrupt condition
addi t1, t1, 1 // Increment the interrupt counter
sw t1, 0(t0) // Store the interrupt counter
STORE t1, 0(t0) // Store the interrupt counter
/* Nested interrupt condition.
Save the reset of the scratch registers on the stack and return to the
@@ -170,7 +170,7 @@ _tx_thread_not_nested_save:
/* else if (_tx_thread_current_ptr)
{ */
addi t1, t1, 1 // Increment the interrupt counter
sw t1, 0(t0) // Store the interrupt counter
STORE t1, 0(t0) // Store the interrupt counter
/* Not nested: Find the user thread that was running and load our SP */
+2 -2
View File
@@ -94,7 +94,7 @@ _tx_thread_schedule_thread:
/* Atomically claim the thread's ready token so only one hart can
dispatch this TCB at a time. */
addi t2, t1, TX_THREAD_SMP_LOCK_READY_BIT_OFFSET // Pickup lock/ready-bit address
amoswap.w.aq t3, x0, (t2) // Clear it and fetch prior state
AMOSWAP_AQ t3, x0, (t2) // Clear it and fetch prior state
beqz t3, _tx_thread_schedule // If not ready, retry scheduling
/* }
@@ -115,7 +115,7 @@ _tx_thread_schedule_thread:
and restart so the new selection is not missed. */
STORE x0, 0(t5) // Clear current thread pointer
li t3, 1 // Rebuild ready token
amoswap.w.rl x0, t3, (t2) // Restore ready token with release ordering
AMOSWAP_RL x0, t3, (t2) // Restore ready token with release ordering
j _tx_thread_schedule_loop // Restart scheduling
_execute_pointer_did_not_change:
@@ -8,7 +8,7 @@
_tx_thread_smp_current_state_get:
csrr t0, mhartid // Pickup current hart ID
la t1, _tx_thread_system_state // Base of per-hart system-state array
slli t0, t0, 2 // Build offset into array
slli t0, t0, LOG_REGBYTES // Build offset into array
add t1, t1, t0 // Select this hart's slot
LWU a0, 0(t1) // Return current system state
LOAD a0, 0(t1) // Return current system state
ret
@@ -16,17 +16,21 @@ _tx_thread_smp_initialize_wait:
beqz t0, _tx_thread_smp_initialize_done // Core 0 does not wait
/* Build per-hart offsets for ULONG and pointer arrays. */
slli t1, t0, 2 // ULONG array offset
slli t1, t0, LOG_REGBYTES // ULONG array offset
slli t2, t0, LOG_REGBYTES // Pointer array offset
/* Wait until ThreadX has acknowledged this hart by setting its
system state to TX_INITIALIZE_IN_PROGRESS. */
li t3, 0xF0F0F0F0 // TX_INITIALIZE_IN_PROGRESS
#if __riscv_xlen == 64
slli t3, t3, 32
srli t3, t3, 32
#endif
la t4, _tx_thread_system_state // Base of system state array
add t5, t4, t1 // This hart's system state slot
_tx_thread_smp_wait_for_initialize:
LWU t6, 0(t5) // Pickup current hart's system state
LOAD t6, 0(t5) // Pickup current hart's system state
bne t6, t3, _tx_thread_smp_wait_for_initialize
/* Save the system stack pointer for this hart. */
@@ -38,15 +42,15 @@ _tx_thread_smp_wait_for_initialize:
la t3, _tx_thread_smp_release_cores_flag // Release flag address
_tx_thread_smp_wait_for_release:
LWU t6, 0(t3) // Pickup release flag
LOAD t6, 0(t3) // Pickup release flag
beqz t6, _tx_thread_smp_wait_for_release
/* Acknowledge the release by clearing this hart's system state. */
sw x0, 0(t5) // Set this hart's system state to zero
STORE x0, 0(t5) // Set this hart's system state to zero
/* Wait for core 0 to finish initialization. */
_tx_thread_smp_wait_for_core0:
LWU t6, 0(t4) // Pickup core 0 system state
LOAD t6, 0(t4) // Pickup core 0 system state
bnez t6, _tx_thread_smp_wait_for_core0
/* Prepare interrupt state */
+6 -6
View File
@@ -18,23 +18,23 @@ _tx_thread_smp_protect:
la t1, _tx_thread_smp_protection
/* If this hart already owns protection, just nest the count. */
LWU t3, 4(t1) // Pickup owning hart
LOAD t3, 1*REGBYTES(t1) // Pickup owning hart
beq t3, t2, _owned // Already owned by this hart
/* Try to get the protection. */
LWU t4, 0(t1) // Pickup protection flag
LOAD t4, 0(t1) // Pickup protection flag
bnez t4, _protection_busy // If set, protection is busy
li t4, 1 // Build lock value
amoswap.w.aq t5, t4, (t1) // Attempt to get protection
AMOSWAP_AQ t5, t4, (t1) // Attempt to get protection
bnez t5, _protection_busy // If old value != 0, retry
fence rw, rw // Ensure lock acquisition is visible
sw t2, 4(t1) // Save owning hart
STORE t2, 1*REGBYTES(t1) // Save owning hart
_owned:
LWU t5, 8(t1) // Pickup ownership count
LOAD t5, 2*REGBYTES(t1) // Pickup ownership count
addi t5, t5, 1 // Increment ownership count
sw t5, 8(t1) // Store ownership count
STORE t5, 2*REGBYTES(t1) // Store ownership count
fence rw, rw // Publish owner/count before return
ret
@@ -17,14 +17,14 @@ _tx_thread_smp_unprotect:
la t2, _tx_thread_smp_protection
/* Only the owning hart may release the protection. */
LWU t3, 4(t2) // Pickup owning hart
LOAD t3, 1*REGBYTES(t2) // Pickup owning hart
bne t1, t3, _still_protected // Not owner, skip release
/* Pickup and decrement the protection count. */
LWU t3, 8(t2) // Pickup protection count
LOAD t3, 2*REGBYTES(t2) // Pickup protection count
beqz t3, _still_protected // Already cleared
addi t3, t3, -1 // Decrement protection count
sw t3, 8(t2) // Store new count
STORE t3, 2*REGBYTES(t2) // Store new count
bnez t3, _still_protected // Still nested, stay protected
/* If preemption is disabled, keep protection in force. */
@@ -34,8 +34,8 @@ _tx_thread_smp_unprotect:
/* Release the protection. */
li t3, -1 // Invalid owner value
sw t3, 4(t2) // Mark owning hart invalid
amoswap.w.rl x0, x0, (t2) // Release protection flag
STORE t3, 1*REGBYTES(t2) // Mark owning hart invalid
AMOSWAP_RL x0, x0, (t2) // Release protection flag
_still_protected:
csrw mstatus, a0 // Restore interrupt posture
+2 -2
View File
@@ -138,7 +138,7 @@ If floating point support:
Stack Bottom: (higher memory address) */
LOAD t0, TX_THREAD_STACK_END_OFFSET(a0) // Pickup end of stack area
LOAD t0, 4*REGBYTES(a0) // Pickup end of stack area
andi t0, t0, -16 // Ensure 16-byte alignment
/* Actually build the stack frame. */
@@ -224,6 +224,6 @@ If floating point support:
STORE t0, 2*REGBYTES(a0) // Save stack pointer in thread's
addi t1, x0, 1 // Build ready flag
sw t1, TX_THREAD_SMP_LOCK_READY_BIT_OFFSET(a0) // Set ready flag
STORE t1, TX_THREAD_SMP_LOCK_READY_BIT_OFFSET(a0) // Set ready flag
ret // control block and return
/* } */
@@ -136,7 +136,7 @@ _tx_thread_system_return:
#endif
csrr t5, mhartid // Pickup current hart ID
slli t6, t5, 2 // Build per-hart ULONG offset
slli t6, t5, LOG_REGBYTES // Build per-hart ULONG offset
slli t5, t5, LOG_REGBYTES // Build per-hart pointer offset
la t0, _tx_thread_current_ptr // Pickup base of current-thread array
@@ -158,7 +158,7 @@ _tx_thread_system_return:
la t4, _tx_timer_time_slice // Pickup base of time-slice array
add t4, t4, t6 // Select this hart's time-slice slot
lw t3, 0(t4) // Pickup time slice value
LOAD t3, 0(t4) // Pickup time slice value
la t2, _tx_thread_schedule // Pickup address of scheduling loop
beqz t3, _tx_thread_dont_save_ts // If no time-slice, don't save it
@@ -166,8 +166,8 @@ _tx_thread_system_return:
/* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
_tx_timer_time_slice = 0; */
sw t3, TX_THREAD_TIME_SLICE_OFFSET(t1) // Save current time-slice for thread
sw x0, 0(t4) // Clear time-slice variable
STORE t3, 6*REGBYTES(t1) // Save current time-slice for thread
STORE x0, 0(t4) // Clear time-slice variable
/* } */
_tx_thread_dont_save_ts:
@@ -181,18 +181,18 @@ _tx_thread_dont_save_ts:
fence rw, rw // Publish current-thread clear before ready token
addi t3, t1, TX_THREAD_SMP_LOCK_READY_BIT_OFFSET // Pickup lock/ready-bit address
li t4, 1 // Build ready token
amoswap.w.rl x0, t4, (t3) // Restore ready token
AMOSWAP_RL x0, t4, (t3) // Restore ready token
/* Clear protection state. */
la t3, _tx_thread_preempt_disable // Pickup preempt-disable address
sw x0, 0(t3) // Clear preempt disable flag
la t3, _tx_thread_smp_protection // Pickup protection structure
sw x0, 8(t3) // Clear protection count
STORE x0, 2*REGBYTES(t3) // Clear protection count
li t4, -1 // Build invalid owner value
sw t4, 4(t3) // Invalidate owning hart
STORE t4, 1*REGBYTES(t3) // Invalidate owning hart
fence rw, rw // Ensure shared accesses complete before unlock
sw x0, 0(t3) // Clear protection in-force flag
STORE x0, 0(t3) // Clear protection in-force flag
jr t2 // Return to thread scheduler
/* } */