mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
s390/atomic: Provide arch_atomic_*_and_test() implementations
Provide arch_atomic_*_and_test() implementations which make use of flag output constraints, and allow the compiler to generate slightly better code. Reviewed-by: Juergen Christ <jchrist@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
7c7f32c9ee
commit
a53f5d247e
2 changed files with 109 additions and 0 deletions
|
|
@ -57,6 +57,24 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
|
|||
}
|
||||
#define arch_atomic_dec arch_atomic_dec
|
||||
|
||||
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_and_test_barrier(-i, &v->counter);
|
||||
}
|
||||
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
|
||||
|
||||
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
return __atomic_add_const_and_test_barrier(-1, &v->counter);
|
||||
}
|
||||
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
|
||||
|
||||
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
return __atomic_add_const_and_test_barrier(1, &v->counter);
|
||||
}
|
||||
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
|
||||
|
||||
#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
|
||||
#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
|
||||
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
|
||||
|
|
@ -146,6 +164,24 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
|
|||
}
|
||||
#define arch_atomic64_dec arch_atomic64_dec
|
||||
|
||||
static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_and_test_barrier(-i, (long *)&v->counter);
|
||||
}
|
||||
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
|
||||
|
||||
static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_const_and_test_barrier(-1, (long *)&v->counter);
|
||||
}
|
||||
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
|
||||
|
||||
static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_const_and_test_barrier(1, (long *)&v->counter);
|
||||
}
|
||||
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
|
||||
|
||||
static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
|
||||
{
|
||||
return arch_xchg(&v->counter, new);
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/limits.h>
|
||||
#include <asm/march.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
static __always_inline int __atomic_read(const int *ptr)
|
||||
{
|
||||
|
|
@ -169,4 +170,76 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
|
|||
|
||||
#endif /* MARCH_HAS_Z196_FEATURES */
|
||||
|
||||
#if defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__)
|
||||
|
||||
#define __ATOMIC_TEST_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
op_type tmp; \
|
||||
int cc; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[tmp],%[val],%[ptr]\n" \
|
||||
op_barrier \
|
||||
: "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \
|
||||
: [val] "d" (val) \
|
||||
: "memory"); \
|
||||
return (cc == 0) || (cc == 2); \
|
||||
} \
|
||||
|
||||
#define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_TEST_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
|
||||
__ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal")
|
||||
__ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg")
|
||||
|
||||
#undef __ATOMIC_TEST_OPS
|
||||
#undef __ATOMIC_TEST_OP
|
||||
|
||||
#define __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
int cc; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[ptr],%[val]\n" \
|
||||
op_barrier \
|
||||
: "=@cc" (cc), [ptr] "+QS" (*ptr) \
|
||||
: [val] "i" (val) \
|
||||
: "memory"); \
|
||||
return (cc == 0) || (cc == 2); \
|
||||
}
|
||||
|
||||
#define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \
|
||||
__ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
|
||||
__ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi")
|
||||
__ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi")
|
||||
|
||||
#undef __ATOMIC_CONST_TEST_OPS
|
||||
#undef __ATOMIC_CONST_TEST_OP
|
||||
|
||||
#else /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
#define __ATOMIC_TEST_OP(op_name, op_func, op_type) \
|
||||
static __always_inline bool op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
return op_func(val, ptr) == -val; \
|
||||
}
|
||||
|
||||
__ATOMIC_TEST_OP(__atomic_add_and_test, __atomic_add, int)
|
||||
__ATOMIC_TEST_OP(__atomic_add_and_test_barrier, __atomic_add_barrier, int)
|
||||
__ATOMIC_TEST_OP(__atomic_add_const_and_test, __atomic_add, int)
|
||||
__ATOMIC_TEST_OP(__atomic_add_const_and_test_barrier, __atomic_add_barrier, int)
|
||||
__ATOMIC_TEST_OP(__atomic64_add_and_test, __atomic64_add, long)
|
||||
__ATOMIC_TEST_OP(__atomic64_add_and_test_barrier, __atomic64_add_barrier, long)
|
||||
__ATOMIC_TEST_OP(__atomic64_add_const_and_test, __atomic64_add, long)
|
||||
__ATOMIC_TEST_OP(__atomic64_add_const_and_test_barrier, __atomic64_add_barrier, long)
|
||||
|
||||
#undef __ATOMIC_TEST_OP
|
||||
|
||||
#endif /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
#endif /* __ARCH_S390_ATOMIC_OPS__ */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue