lkml.org 
[lkml]   [2019]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 08/18] locking/atomic: ia64: use s64 for atomic64
Date
As a step towards making the atomic64 API use consistent types treewide,
let's have the ia64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
---
arch/ia64/include/asm/atomic.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 206530d0751b..50440f3ddc43 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -124,10 +124,10 @@ ATOMIC_FETCH_OP(xor, ^)
#undef ATOMIC_OP

#define ATOMIC64_OP(op, c_op) \
-static __inline__ long \
-ia64_atomic64_##op (__s64 i, atomic64_t *v) \
+static __inline__ s64 \
+ia64_atomic64_##op (s64 i, atomic64_t *v) \
{ \
- __s64 old, new; \
+ s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
@@ -139,10 +139,10 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(op, c_op) \
-static __inline__ long \
-ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
+static __inline__ s64 \
+ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
{ \
- __s64 old, new; \
+ s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
@@ -162,7 +162,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_add_return(i,v) \
({ \
- long __ia64_aar_i = (i); \
+ s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
@@ -170,7 +170,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_sub_return(i,v) \
({ \
- long __ia64_asr_i = (i); \
+ s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
@@ -178,7 +178,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_fetch_add(i,v) \
({ \
- long __ia64_aar_i = (i); \
+ s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
@@ -186,7 +186,7 @@ ATOMIC64_OPS(sub, -)

#define atomic64_fetch_sub(i,v) \
({ \
- long __ia64_asr_i = (i); \
+ s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
--
2.11.0
\
 
 \ /
  Last update: 2019-05-22 15:25    [W:1.266 / U:0.956 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site