summaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-09-25 18:54:04 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-09-25 19:35:40 +0200
commitb2eff7a7fd18afe5f15250ebda36b3e70b5172b1 (patch)
treeaae4bf69631affab4af49eb5fd66f5868f77050e /src
parentc2aa4b7e7e35974a8740f189c20b767e795bddbe (diff)
chacha20-arm,poly1305-arm: fix big-endian aarch64
Suggested-by: Andy Polyakov <appro@openssl.org> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r--src/crypto/zinc/chacha20/chacha20-arm64.S18
-rw-r--r--src/crypto/zinc/poly1305/poly1305-arm64.S24
2 files changed, 21 insertions, 21 deletions
diff --git a/src/crypto/zinc/chacha20/chacha20-arm64.S b/src/crypto/zinc/chacha20/chacha20-arm64.S
index 5037510..1ae11a5 100644
--- a/src/crypto/zinc/chacha20/chacha20-arm64.S
+++ b/src/crypto/zinc/chacha20/chacha20-arm64.S
@@ -34,7 +34,7 @@ ENTRY(chacha20_arm)
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
@@ -195,7 +195,7 @@ ENTRY(chacha20_arm)
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
@@ -251,7 +251,7 @@ ENTRY(chacha20_arm)
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
@@ -316,7 +316,7 @@ ENTRY(chacha20_neon)
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
@@ -614,7 +614,7 @@ ENTRY(chacha20_neon)
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
@@ -693,7 +693,7 @@ ENTRY(chacha20_neon)
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
@@ -795,7 +795,7 @@ ENTRY(chacha20_neon)
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
@@ -1308,7 +1308,7 @@ ENTRY(chacha20_neon)
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
@@ -1822,7 +1822,7 @@ ENTRY(chacha20_neon)
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
diff --git a/src/crypto/zinc/poly1305/poly1305-arm64.S b/src/crypto/zinc/poly1305/poly1305-arm64.S
index 84a6544..5f4e7fb 100644
--- a/src/crypto/zinc/poly1305/poly1305-arm64.S
+++ b/src/crypto/zinc/poly1305/poly1305-arm64.S
@@ -21,7 +21,7 @@ ENTRY(poly1305_init_arm)
ldp x7,x8,[x1] // load key
mov x9,#0xfffffffc0fffffff
movk x9,#0x0fff,lsl#48
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x7,x7 // flip bytes
rev x8,x8
#endif
@@ -49,7 +49,7 @@ ENTRY(poly1305_blocks_arm)
.Loop:
ldp x10,x11,[x1],#16 // load input
sub x2,x2,#16
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x10,x10
rev x11,x11
#endif
@@ -112,13 +112,13 @@ ENTRY(poly1305_emit_arm)
csel x4,x4,x12,eq
csel x5,x5,x13,eq
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
ror x10,x10,#32 // flip nonce words
ror x11,x11,#32
#endif
adds x4,x4,x10 // accumulate nonce
adc x5,x5,x11
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x4,x4 // flip output bytes
rev x5,x5
#endif
@@ -232,7 +232,7 @@ ENTRY(poly1305_blocks_neon)
adcs x5,x5,xzr
adc x6,x6,xzr
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x12,x12
rev x13,x13
#endif
@@ -278,7 +278,7 @@ ENTRY(poly1305_blocks_neon)
ldp x12,x13,[x1],#16 // load input
sub x2,x2,#16
add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x12,x12
rev x13,x13
#endif
@@ -363,7 +363,7 @@ ENTRY(poly1305_blocks_neon)
lsl x3,x3,#24
add x15,x0,#48
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x8,x8
rev x12,x12
rev x9,x9
@@ -399,7 +399,7 @@ ENTRY(poly1305_blocks_neon)
ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
ld1 {v8.4s},[x15]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x8,x8
rev x12,x12
rev x9,x9
@@ -460,7 +460,7 @@ ENTRY(poly1305_blocks_neon)
umull v20.2d,v14.2s,v1.s[2]
ldp x9,x13,[x16],#48
umull v19.2d,v14.2s,v0.s[2]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x8,x8
rev x12,x12
rev x9,x9
@@ -525,7 +525,7 @@ ENTRY(poly1305_blocks_neon)
umlal v23.2d,v11.2s,v3.s[0]
umlal v20.2d,v11.2s,v8.s[0]
umlal v21.2d,v11.2s,v0.s[0]
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x8,x8
rev x12,x12
rev x9,x9
@@ -803,13 +803,13 @@ ENTRY(poly1305_emit_neon)
csel x4,x4,x12,eq
csel x5,x5,x13,eq
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
ror x10,x10,#32 // flip nonce words
ror x11,x11,#32
#endif
adds x4,x4,x10 // accumulate nonce
adc x5,x5,x11
-#ifdef __ARMEB__
+#ifdef __AARCH64EB__
rev x4,x4 // flip output bytes
rev x5,x5
#endif