[Linux-Xtensa] [PATCH] xtensa: add exclusive access support

Max Filippov jcmvbkbc at gmail.com
Sun Aug 9 06:28:17 UTC 2020


Add XCHAL definitions for S32C1I and EXCLUSIVE options to
xtensa-config.h, include it in places that implement atomic operations
and add implementations with exclusive access option opcodes.

Signed-off-by: Max Filippov <jcmvbkbc at gmail.com>
---
 libc/sysdeps/linux/xtensa/bits/atomic.h       | 128 ++++++++++++++++++
 .../sysdeps/linux/xtensa/bits/xtensa-config.h |   6 +
 .../linuxthreads/sysdeps/xtensa/pt-machine.h  |  56 ++++++++
 .../nptl/sysdeps/xtensa/pthread_spin_lock.S   |  16 +++
 .../sysdeps/xtensa/pthread_spin_trylock.S     |  17 +++
 5 files changed, 223 insertions(+)

diff --git a/libc/sysdeps/linux/xtensa/bits/atomic.h b/libc/sysdeps/linux/xtensa/bits/atomic.h
index efc027d1ac25..18b809998054 100644
--- a/libc/sysdeps/linux/xtensa/bits/atomic.h
+++ b/libc/sysdeps/linux/xtensa/bits/atomic.h
@@ -18,6 +18,7 @@
 #ifndef _BITS_ATOMIC_H
 #define _BITS_ATOMIC_H  1
 
+#include <bits/xtensa-config.h>
 #include <inttypes.h>
 
 typedef int32_t atomic32_t;
@@ -50,6 +51,128 @@ typedef uintmax_t uatomic_max_t;
 #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
       (abort (), 0)
 
+#if XCHAL_HAVE_EXCLUSIVE
+
+/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
+   Return the old *MEM value.  */
+
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)  \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       bne     %0, %4, 2f              \n"                    \
+      "       mov     %1, %3                  \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       memw                            \n"                    \
+      "2:                                     \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem), "a" (newval), "a" (oldval)                        \
+      : "memory" );                                                  \
+    __value;                                                         \
+  })
+
+/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
+   Return zero if *MEM was changed or non-zero if no exchange happened.  */
+
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       sub     %0, %4, %0              \n"                    \
+      "       bnez    %0, 2f                  \n"                    \
+      "       mov     %1, %3                  \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       movi    %0, 0                   \n"                    \
+      "       memw                            \n"                    \
+      "2:                                     \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem), "a" (newval), "a" (oldval)                        \
+      : "memory" );                                                  \
+    __value != 0;                                                    \
+  })
+
+/* Store NEWVALUE in *MEM and return the old value.  */
+
+#define __arch_exchange_32_acq(mem, newval)                          \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       mov     %1, %3                  \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       memw                            \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem), "a" (newval)                                      \
+      : "memory" );                                                  \
+    __value;                                                         \
+  })
+
+/* Add VALUE to *MEM and return the old value of *MEM.  */
+
+#define __arch_atomic_exchange_and_add_32(mem, value)                \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       add     %1, %0, %3              \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       memw                            \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem), "a" (value)                                       \
+      : "memory" );                                                  \
+    __value;                                                         \
+  })
+
+/* Subtract VALUE from *MEM and return the old value of *MEM.  */
+
+#define __arch_atomic_exchange_and_sub_32(mem, value)                \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       sub     %1, %0, %3              \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       memw                            \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem), "a" (value)                                       \
+      : "memory" );                                                  \
+    __tmp;                                                           \
+  })
+
+/* Decrement *MEM if it is > 0, and return the old value.  */
+
+#define __arch_atomic_decrement_if_positive_32(mem)                  \
+  ({__typeof__(*(mem)) __tmp, __value;                               \
+    __asm__ __volatile__(                                            \
+      "       memw                            \n"                    \
+      "1:     l32ex   %0, %2                  \n"                    \
+      "       blti    %0, 1, 2f               \n"                    \
+      "       addi    %1, %0, -1              \n"                    \
+      "       s32ex   %1, %2                  \n"                    \
+      "       getex   %1                      \n"                    \
+      "       beqz    %1, 1b                  \n"                    \
+      "       memw                            \n"                    \
+      "2:                                     \n"                    \
+      : "=&a" (__value), "=&a" (__tmp)                               \
+      : "a" (mem)                                                    \
+      : "memory" );                                                  \
+    __value;                                                         \
+  })
+
+#elif XCHAL_HAVE_S32C1I
+
 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
    Return the old *MEM value.  */
 
@@ -156,6 +279,11 @@ typedef uintmax_t uatomic_max_t;
     __value;                                                         \
   })
 
+#else
+
+#error No hardware atomic operations
+
+#endif
 
 /* These are the preferred public interfaces: */
 
diff --git a/libc/sysdeps/linux/xtensa/bits/xtensa-config.h b/libc/sysdeps/linux/xtensa/bits/xtensa-config.h
index 2e60af936e6e..b99928b1e058 100644
--- a/libc/sysdeps/linux/xtensa/bits/xtensa-config.h
+++ b/libc/sysdeps/linux/xtensa/bits/xtensa-config.h
@@ -43,4 +43,10 @@
 #undef XCHAL_NUM_AREGS
 #define XCHAL_NUM_AREGS			64
 
+#undef XCHAL_HAVE_S32C1I
+#define XCHAL_HAVE_S32C1I		1
+
+#undef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE		0
+
 #endif /* !XTENSA_CONFIG_H */
diff --git a/libpthread/linuxthreads/sysdeps/xtensa/pt-machine.h b/libpthread/linuxthreads/sysdeps/xtensa/pt-machine.h
index 82d9b540c611..0b7f58b635f3 100644
--- a/libpthread/linuxthreads/sysdeps/xtensa/pt-machine.h
+++ b/libpthread/linuxthreads/sysdeps/xtensa/pt-machine.h
@@ -21,6 +21,7 @@
 #ifndef _PT_MACHINE_H
 #define _PT_MACHINE_H   1
 
+#include <bits/xtensa-config.h>
 #include <sys/syscall.h>
 #include <asm/unistd.h>
 
@@ -34,6 +35,55 @@
 extern long int testandset (int *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
+#if XCHAL_HAVE_EXCLUSIVE
+
+/* Spinlock implementation; required.  */
+PT_EI long int
+testandset (int *spinlock)
+{
+	unsigned long tmp;
+	__asm__ volatile (
+"	memw				\n"
+"1:	l32ex	%0, %1			\n"
+"	bnez	%0, 2f			\n"
+"	movi	%0, 1			\n"
+"	s32ex	%0, %1			\n"
+"	getex	%0			\n"
+"	beqz	%0, 1b			\n"
+"	movi	%0, 0			\n"
+"	memw				\n"
+"2:					\n"
+	: "=&a" (tmp)
+	: "a" (spinlock)
+	: "memory"
+	);
+	return tmp;
+}
+
+PT_EI int
+__compare_and_swap (long int *p, long int oldval, long int newval)
+{
+        unsigned long tmp;
+        unsigned long value;
+        __asm__ volatile (
+"       memw                         \n"
+"1:     l32ex   %0, %2               \n"
+"       bne     %0, %4, 2f           \n"
+"       mov     %1, %3               \n"
+"       s32ex   %1, %2               \n"
+"       getex   %1                   \n"
+"       beqz    %1, 1b               \n"
+"       memw                         \n"
+"2:                                  \n"
+          : "=&a" (tmp), "=&a" (value)
+          : "a" (p), "a" (newval), "a" (oldval)
+          : "memory" );
+
+        return tmp == oldval;
+}
+
+#elif XCHAL_HAVE_S32C1I
+
 /* Spinlock implementation; required.  */
 PT_EI long int
 testandset (int *spinlock)
@@ -71,6 +121,12 @@ __compare_and_swap (long int *p, long int oldval, long int newval)
         return tmp == oldval;
 }
 
+#else
+
+#error No hardware atomic operations
+
+#endif
+
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.  */
 #define CURRENT_STACK_FRAME __builtin_frame_address (0)
diff --git a/libpthread/nptl/sysdeps/xtensa/pthread_spin_lock.S b/libpthread/nptl/sysdeps/xtensa/pthread_spin_lock.S
index 3386afae9e58..3faac36da12b 100644
--- a/libpthread/nptl/sysdeps/xtensa/pthread_spin_lock.S
+++ b/libpthread/nptl/sysdeps/xtensa/pthread_spin_lock.S
@@ -15,16 +15,32 @@
    License along with the GNU C Library; see the file COPYING.LIB.  If
    not, see <http://www.gnu.org/licenses/>.  */
 
+#include <bits/xtensa-config.h>
 #include <sysdep.h>
 
 	.text
 ENTRY (pthread_spin_lock)
 
+#if XCHAL_HAVE_EXCLUSIVE
+	memw
+1:	l32ex	a3, a2
+	bnez	a3, 1b
+	movi	a3, 1
+	s32ex	a3, a2
+	getex	a3
+	beqz	a3, 1b
+	memw
+#elif XCHAL_HAVE_S32C1I
 	movi	a3, 0
 	wsr 	a3, scompare1
 	movi	a3, 1
 1:	s32c1i	a3, a2, 0
 	bnez	a3, 1b
+#else
+
+#error No hardware atomic operations
+
+#endif
 	movi	a2, 0
 
 	abi_ret
diff --git a/libpthread/nptl/sysdeps/xtensa/pthread_spin_trylock.S b/libpthread/nptl/sysdeps/xtensa/pthread_spin_trylock.S
index 72b2dda92310..0669682ec9bd 100644
--- a/libpthread/nptl/sysdeps/xtensa/pthread_spin_trylock.S
+++ b/libpthread/nptl/sysdeps/xtensa/pthread_spin_trylock.S
@@ -17,15 +17,32 @@
 
 #define _ERRNO_H 1
 #include <bits/errno.h>
+#include <bits/xtensa-config.h>
 #include <sysdep.h>
 
 	.text
 ENTRY (pthread_spin_trylock)
 
+#if XCHAL_HAVE_EXCLUSIVE
+	memw
+	l32ex	a3, a2
+	bnez	a3, 1f
+	movi	a3, 1
+	s32ex	a3, a2
+	getex	a3
+	addi	a3, a3, -1
+	memw
+1:
+#elif XCHAL_HAVE_S32C1I
 	movi	a3, 0
 	wsr 	a3, scompare1
 	movi	a3, 1
 	s32c1i	a3, a2, 0
+#else
+
+#error No hardware atomic operations
+
+#endif
 	movi	a2, EBUSY
 	moveqz	a2, a3, a3
 
-- 
2.20.1



More information about the linux-xtensa mailing list