diff --git a/firmware/payloads/test_head.S b/firmware/payloads/test_head.S index 9a87e56f..e4fcaaca 100644 --- a/firmware/payloads/test_head.S +++ b/firmware/payloads/test_head.S @@ -22,6 +22,7 @@ #define REG_L __REG_SEL(ld, lw) #define REG_S __REG_SEL(sd, sw) +#define REG_ADDW __REG_SEL(addw, add) .section .entry, "ax", %progbits .align 3 @@ -35,7 +36,7 @@ _start: #elif defined(__riscv_zalrsc) _sc_fail: lr.w t0, (a3) - addw t1, t0, a2 + REG_ADDW t1, t0, a2 sc.w t1, t1, (a3) bnez t1, _sc_fail move a3, t0 diff --git a/lib/sbi/riscv_atomic.c b/lib/sbi/riscv_atomic.c index fcf07f27..d9abe343 100644 --- a/lib/sbi/riscv_atomic.c +++ b/lib/sbi/riscv_atomic.c @@ -48,7 +48,7 @@ long atomic_add_return(atomic_t *atom, long value) long ret, temp; #if __SIZEOF_LONG__ == 4 __asm__ __volatile__("1:lr.w.aqrl %1,%0\n" - " addw %2,%1,%3\n" + " add %2,%1,%3\n" " sc.w.aqrl %2,%2,%0\n" " bnez %2,1b" : "+A"(atom->counter), "=&r"(ret), "=&r"(temp) diff --git a/lib/sbi/riscv_locks.c b/lib/sbi/riscv_locks.c index c29a9659..e253b1b7 100644 --- a/lib/sbi/riscv_locks.c +++ b/lib/sbi/riscv_locks.c @@ -57,7 +57,11 @@ void spin_lock(spinlock_t *lock) " amoadd.w.aqrl %0, %4, %3\n" #elif defined(__riscv_zalrsc) "3: lr.w.aqrl %0, %3\n" +#if __riscv_xlen == 64 " addw %1, %0, %4\n" +#elif __riscv_xlen == 32 + " add %1, %0, %4\n" +#endif " sc.w.aqrl %1, %1, %3\n" " bnez %1, 3b\n" #else