forked from Mirrors/opensbi
		
	lib: sbi: Remove xchg/cmpxchg implemented via lr/sc
lr/sc is part of the A extension. If the A extension is not supported, lr/sc cannot be used. So remove xchg/cmpxchg. Signed-off-by: Xiang W <wxjstz@126.com> Reviewed-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
		@@ -12,6 +12,10 @@
 | 
			
		||||
#include <sbi/riscv_atomic.h>
 | 
			
		||||
#include <sbi/riscv_barrier.h>
 | 
			
		||||
 | 
			
		||||
#ifndef __riscv_atomic
 | 
			
		||||
#error "opensbi strongly relies on the A extension of RISC-V"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
long atomic_read(atomic_t *atom)
 | 
			
		||||
{
 | 
			
		||||
	long ret = atom->counter;
 | 
			
		||||
@@ -79,131 +83,29 @@ long atomic_sub_return(atomic_t *atom, long value)
 | 
			
		||||
		(__typeof__(*(ptr))) __axchg((ptr), _x_, sizeof(*(ptr)));	\
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define __xchg(ptr, new, size)                                            \
 | 
			
		||||
	({                                                                \
 | 
			
		||||
		__typeof__(ptr) __ptr	 = (ptr);                         \
 | 
			
		||||
		__typeof__(*(ptr)) __new = (new);                         \
 | 
			
		||||
		__typeof__(*(ptr)) __ret;                                 \
 | 
			
		||||
		register unsigned int __rc;                               \
 | 
			
		||||
		switch (size) {                                           \
 | 
			
		||||
		case 4:                                                   \
 | 
			
		||||
			__asm__ __volatile__("0:	lr.w %0, %2\n"    \
 | 
			
		||||
					     "	sc.w.rl %1, %z3, %2\n"    \
 | 
			
		||||
					     "	bnez %1, 0b\n"            \
 | 
			
		||||
					     "	fence rw, rw\n"           \
 | 
			
		||||
					     : "=&r"(__ret), "=&r"(__rc), \
 | 
			
		||||
					       "+A"(*__ptr)               \
 | 
			
		||||
					     : "rJ"(__new)                \
 | 
			
		||||
					     : "memory");                 \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		case 8:                                                   \
 | 
			
		||||
			__asm__ __volatile__("0:	lr.d %0, %2\n"    \
 | 
			
		||||
					     "	sc.d.rl %1, %z3, %2\n"    \
 | 
			
		||||
					     "	bnez %1, 0b\n"            \
 | 
			
		||||
					     "	fence rw, rw\n"           \
 | 
			
		||||
					     : "=&r"(__ret), "=&r"(__rc), \
 | 
			
		||||
					       "+A"(*__ptr)               \
 | 
			
		||||
					     : "rJ"(__new)                \
 | 
			
		||||
					     : "memory");                 \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		default:                                                  \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		}                                                         \
 | 
			
		||||
		__ret;                                                    \
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
#define xchg(ptr, n)                                                     \
 | 
			
		||||
	({                                                               \
 | 
			
		||||
		__typeof__(*(ptr)) _n_ = (n);                            \
 | 
			
		||||
		(__typeof__(*(ptr))) __xchg((ptr), _n_, sizeof(*(ptr))); \
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
#define __cmpxchg(ptr, old, new, size)                                    \
 | 
			
		||||
	({                                                                \
 | 
			
		||||
		__typeof__(ptr) __ptr	 = (ptr);                         \
 | 
			
		||||
		__typeof__(*(ptr)) __old = (old);                         \
 | 
			
		||||
		__typeof__(*(ptr)) __new = (new);                         \
 | 
			
		||||
		__typeof__(*(ptr)) __ret;                                 \
 | 
			
		||||
		register unsigned int __rc;                               \
 | 
			
		||||
		switch (size) {                                           \
 | 
			
		||||
		case 4:                                                   \
 | 
			
		||||
			__asm__ __volatile__("0:	lr.w %0, %2\n"    \
 | 
			
		||||
					     "	bne  %0, %z3, 1f\n"       \
 | 
			
		||||
					     "	sc.w.rl %1, %z4, %2\n"    \
 | 
			
		||||
					     "	bnez %1, 0b\n"            \
 | 
			
		||||
					     "	fence rw, rw\n"           \
 | 
			
		||||
					     "1:\n"                       \
 | 
			
		||||
					     : "=&r"(__ret), "=&r"(__rc), \
 | 
			
		||||
					       "+A"(*__ptr)               \
 | 
			
		||||
					     : "rJ"(__old), "rJ"(__new)   \
 | 
			
		||||
					     : "memory");                 \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		case 8:                                                   \
 | 
			
		||||
			__asm__ __volatile__("0:	lr.d %0, %2\n"    \
 | 
			
		||||
					     "	bne %0, %z3, 1f\n"        \
 | 
			
		||||
					     "	sc.d.rl %1, %z4, %2\n"    \
 | 
			
		||||
					     "	bnez %1, 0b\n"            \
 | 
			
		||||
					     "	fence rw, rw\n"           \
 | 
			
		||||
					     "1:\n"                       \
 | 
			
		||||
					     : "=&r"(__ret), "=&r"(__rc), \
 | 
			
		||||
					       "+A"(*__ptr)               \
 | 
			
		||||
					     : "rJ"(__old), "rJ"(__new)   \
 | 
			
		||||
					     : "memory");                 \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		default:                                                  \
 | 
			
		||||
			break;                                            \
 | 
			
		||||
		}                                                         \
 | 
			
		||||
		__ret;                                                    \
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
#define cmpxchg(ptr, o, n)                                          \
 | 
			
		||||
	({                                                          \
 | 
			
		||||
		__typeof__(*(ptr)) _o_ = (o);                       \
 | 
			
		||||
		__typeof__(*(ptr)) _n_ = (n);                       \
 | 
			
		||||
		(__typeof__(*(ptr)))                                \
 | 
			
		||||
			__cmpxchg((ptr), _o_, _n_, sizeof(*(ptr))); \
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
long atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
 | 
			
		||||
{
 | 
			
		||||
#ifdef __riscv_atomic
 | 
			
		||||
	return __sync_val_compare_and_swap(&atom->counter, oldval, newval);
 | 
			
		||||
#else
 | 
			
		||||
	return cmpxchg(&atom->counter, oldval, newval);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
long atomic_xchg(atomic_t *atom, long newval)
 | 
			
		||||
{
 | 
			
		||||
	/* Atomically set new value and return old value. */
 | 
			
		||||
#ifdef __riscv_atomic
 | 
			
		||||
	return axchg(&atom->counter, newval);
 | 
			
		||||
#else
 | 
			
		||||
	return xchg(&atom->counter, newval);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
 | 
			
		||||
				  unsigned int newval)
 | 
			
		||||
{
 | 
			
		||||
	/* Atomically set new value and return old value. */
 | 
			
		||||
#ifdef __riscv_atomic
 | 
			
		||||
	return axchg(ptr, newval);
 | 
			
		||||
#else
 | 
			
		||||
	return xchg(ptr, newval);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned long atomic_raw_xchg_ulong(volatile unsigned long *ptr,
 | 
			
		||||
				    unsigned long newval)
 | 
			
		||||
{
 | 
			
		||||
	/* Atomically set new value and return old value. */
 | 
			
		||||
#ifdef __riscv_atomic
 | 
			
		||||
	return axchg(ptr, newval);
 | 
			
		||||
#else
 | 
			
		||||
	return xchg(ptr, newval);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if (__SIZEOF_POINTER__ == 8)
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user