summaryrefslogtreecommitdiffstats
path: root/recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch')
-rw-r--r--recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch50
1 files changed, 0 insertions, 50 deletions
diff --git a/recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch b/recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch
deleted file mode 100644
index 6e957a50..00000000
--- a/recipes-extended/xen/files/xen-arm64-implement-atomic-fetch-add.patch
+++ /dev/null
@@ -1,50 +0,0 @@
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Christopher Clark <christopher.w.clark@gmail.com>
3Date: Fri, 26 June 2020 16:34:00 -0800
4Subject: [PATCH] xen: implement atomic op to fix arm64 compilation
5
6Xen's ARM implementation of arch_fetch_and_add since f9cc3cd9
7uses a builtin, despite the build being performed with -fno-builtin.
8With gcc 10.1.0, this now breaks prelinking spinlock.c, so
9implement the one atomic operation that is required with logic
10derived from Linux's atomic_ll_sc.h: ATOMIC_FETCH_OP and comparison with
11the binary produced with and without the patch with gcc 9.2.0.
12
13Signed-off-by: Christopher Clark <christopher.w.clark@gmail.com>
14diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
15index e5d062667d..c46dd3ac71 100644
16--- a/xen/include/asm-arm/system.h
17+++ b/xen/include/asm-arm/system.h
18@@ -55,7 +55,32 @@ static inline int local_abort_is_enabled(void)
19 return !(flags & PSR_ABT_MASK);
20 }
21
22+#ifdef CONFIG_ARM_64
23+
24+/* see atomic_ll_sc.h: ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) */
25+static inline int arch_fetch_and_add(unsigned int *ptr, unsigned long i)
26+{
27+ int register lptr asm("x0");
28+ int register result asm("w1");
29+ int register newval asm("w2");
30+ int register status asm("w3");
31+
32+ asm volatile(
33+ " mov %[lptr], %[ptr]\n"
34+ "1: ldxr %w[result], [%[lptr]]\n"
35+ " add %w[newval], %w[result], %w[i]\n"
36+ " stlxr %w[status], %w[newval], [%[lptr]]\n"
37+ " cbnz %w[status], 1b\n"
38+ " dmb ish\n"
39+ : [result] "=&r" (result), [lptr] "=&r" (lptr), [newval] "=&r" (newval), [status] "=&r" (status), [i] "+r" (i), "+Q" (*ptr)
40+ : [ptr] "r" (ptr), "r" (i)
41+ : "memory");
42+
43+ return result;
44+}
45+#else
46 #define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v)
47+#endif
48
49 extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next);
50