lkml.org 
[lkml]   [2018]   [Apr]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH 12/45] C++: x86: Turn cmpxchg_double() & co. into inline template functions
From
Date
Turn cmpxchg_double() and similar functions into inline C++ template
functions. This produces more robust source as the all the casting the C
macros require is then unnecessary.

Signed-off-by: David Howells <dhowells@redhat.com>
---

arch/x86/include/asm/cmpxchg.h | 93 +++++++++++++++++++++++++++++++---------
1 file changed, 71 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 4bbf947c88a2..2ffe1c621eb1 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -180,27 +180,76 @@ static inline P sync_cmpxchg(P *ptr, P old, N rep)
# include <asm/cmpxchg_64.h>
#endif

-#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
-({ \
- bool __ret; \
- __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
- __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
- BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
- BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
- VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
- VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
- asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
- : "=a" (__ret), "+d" (__old2), \
- "+m" (*(p1)), "+m" (*(p2)) \
- : "i" (2 * sizeof(long)), "a" (__old1), \
- "b" (__new1), "c" (__new2)); \
- __ret; \
-})
-
-#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
- __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
-
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
- __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
+template <typename P1, typename P2, typename N1, typename N2>
+static inline bool __cmpxchg_double(P1 *p1, P2 *p2,
+ P1 old1, P2 old2,
+ N1 rep1, N2 rep2,
+ enum cmpxchg_lock lock = __lock_always)
+{
+ bool ret;
+
+ if (sizeof(P1) != sizeof(long)) __cmpxchg_wrong_size();
+ if (sizeof(P2) != sizeof(long)) __cmpxchg_wrong_size();
+
+ //VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));
+ //VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));
+
+ switch (lock) {
+ case __lock_local:
+ asm volatile("cmpxchg%c4b %[ptr1]"
+ CC_SET(z)
+ : CC_OUT(z) (ret),
+ [old2]"+d" (old2),
+ [ptr1] "+m" (*p1),
+ [ptr2] "+m" (*p2)
+ : "i" (2 * sizeof(long)),
+ [old1] "a" (old1),
+ [rep1] "b" (rep1),
+ [rep2] "c" (rep2));
+ break;
+ case __lock_smp:
+ asm volatile(LOCK_PREFIX "cmpxchg%c4b %[ptr1]"
+ CC_SET(z)
+ : CC_OUT(z) (ret),
+ [old2]"+d" (old2),
+ [ptr1] "+m" (*p1),
+ [ptr2] "+m" (*p2)
+ : "i" (2 * sizeof(long)),
+ [old1] "a" (old1),
+ [rep1] "b" (rep1),
+ [rep2] "c" (rep2));
+ break;
+ case __lock_always:
+ asm volatile("lock; cmpxchg%c4b %[ptr1]"
+ CC_SET(z)
+ : CC_OUT(z) (ret),
+ [old2]"+d" (old2),
+ [ptr1] "+m" (*p1),
+ [ptr2] "+m" (*p2)
+ : "i" (2 * sizeof(long)),
+ [old1] "a" (old1),
+ [rep1] "b" (rep1),
+ [rep2] "c" (rep2));
+ break;
+ }
+
+ return ret;
+}
+
+template <typename P1, typename P2, typename N1, typename N2>
+static inline bool cmpxchg_double(P1 *p1, P2 *p2,
+ P1 old1, P2 old2,
+ N1 rep1, N2 rep2)
+{
+ return __cmpxchg_double(p1, p2, old1, old2, rep1, rep2, __lock_always);
+}
+
+template <typename P1, typename P2, typename N1, typename N2>
+static inline bool cmpxchg_double_local(P1 *p1, P2 *p2,
+ P1 old1, P2 old2,
+ N1 rep1, N2 rep2)
+{
+ return __cmpxchg_double(p1, p2, old1, old2, rep1, rep2, __lock_local);
+}

#endif /* ASM_X86_CMPXCHG_H */
\
 
 \ /
  Last update: 2018-04-01 22:49    [W:0.396 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site