s390/lib: Use exrl instead of ex in xor functions

exrl is present in all machines currently supported, therefore prefer
it over ex. This saves one instruction and doesn't need an additional
register to hold the address of the target instruction.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
Sven Schnelle 2025-01-08 15:27:01 +01:00 committed by Alexander Gordeev
parent 9988df07db
commit 745600ed69

View file

@ -15,7 +15,6 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
@ -25,12 +24,12 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
"1: exrl %0,2f\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
"3:\n"
: : "d" (bytes), "a" (p1), "a" (p2)
: "0", "1", "cc", "memory");
: "0", "cc", "memory");
}
static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
@ -38,9 +37,8 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p3)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" jm 4f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@ -50,14 +48,14 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" j 3f\n"
"1: exrl %0,2f\n"
" exrl %0,3f\n"
" j 4f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
"3:\n"
"3: xc 0(1,%1),0(%3)\n"
"4:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
: : "0", "1", "cc", "memory");
: : "0", "cc", "memory");
}
static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
@ -66,9 +64,8 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p4)
{
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" jm 5f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@ -80,16 +77,16 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" j 3f\n"
"1: exrl %0,2f\n"
" exrl %0,3f\n"
" exrl %0,4f\n"
" j 5f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
"3:\n"
"3: xc 0(1,%1),0(%3)\n"
"4: xc 0(1,%1),0(%4)\n"
"5:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
: : "0", "1", "cc", "memory");
: : "0", "cc", "memory");
}
static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
@ -101,7 +98,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" jm 6f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@ -115,19 +112,19 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
" la %4,256(%4)\n"
" la %5,256(%5)\n"
" brctg 0,0b\n"
"1: ex %0,0(1)\n"
" ex %0,6(1)\n"
" ex %0,12(1)\n"
" ex %0,18(1)\n"
" j 3f\n"
"1: exrl %0,2f\n"
" exrl %0,3f\n"
" exrl %0,4f\n"
" exrl %0,5f\n"
" j 6f\n"
"2: xc 0(1,%1),0(%2)\n"
" xc 0(1,%1),0(%3)\n"
" xc 0(1,%1),0(%4)\n"
" xc 0(1,%1),0(%5)\n"
"3:\n"
"3: xc 0(1,%1),0(%3)\n"
"4: xc 0(1,%1),0(%4)\n"
"5: xc 0(1,%1),0(%5)\n"
"6:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (p5)
: : "0", "1", "cc", "memory");
: : "0", "cc", "memory");
}
struct xor_block_template xor_block_xc = {