lib: sbi: fix atomic_add_return

The unsigned length may be 4 bytes or 8 bytes, amoadd.w only applies
to 4 bytes hence this patch.

Signed-off-by: Xiang W <wxjstz@126.com>
Reviewed-by: Anup Patel <anup.patel@wdc.com>
This commit is contained in:
Xiang W
2021-04-06 11:34:38 +08:00
committed by Anup Patel
parent 4d8e2f135d
commit 70ffc3e2e6

View File

@@ -28,25 +28,23 @@ void atomic_write(atomic_t *atom, long value)
long atomic_add_return(atomic_t *atom, long value)
{
long ret;
#if __SIZEOF_LONG__ == 4
__asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
: "+A"(atom->counter), "=r"(ret)
: "r"(value)
: "memory");
#elif __SIZEOF_LONG__ == 8
__asm__ __volatile__(" amoadd.d.aqrl %1, %2, %0"
: "+A"(atom->counter), "=r"(ret)
: "r"(value)
: "memory");
#endif
return ret + value;
}
long atomic_sub_return(atomic_t *atom, long value)
{
long ret;
__asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
: "+A"(atom->counter), "=r"(ret)
: "r"(-value)
: "memory");
return ret - value;
return atomic_add_return(atom, -value);
}
#define __axchg(ptr, new, size) \