summaryrefslogtreecommitdiffhomepage
path: root/src/crypto/curve25519-hacl64.h
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-23 18:08:03 -0700
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-28 23:20:13 -0600
commit470a0a36d579980431361f23e8f319d5c68aa4af (patch)
tree624317ee7c194f1a8ec61137726adb1215ff276a /src/crypto/curve25519-hacl64.h
parent4e71a11616a7763219e23bd34708751a702c80c7 (diff)
crypto: use unaligned helpers
This is not useful for WireGuard, but for the general use case we probably want it this way, and the speed difference is mostly lost in the noise. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/crypto/curve25519-hacl64.h')
-rw-r--r--src/crypto/curve25519-hacl64.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/src/crypto/curve25519-hacl64.h b/src/crypto/curve25519-hacl64.h
index d2637ac..7d9d734 100644
--- a/src/crypto/curve25519-hacl64.h
+++ b/src/crypto/curve25519-hacl64.h
@@ -565,11 +565,11 @@ static __always_inline void format_fexpand(u64 *output, const u8 *input)
const u8 *x02 = input + 19;
const u8 *x0 = input + 24;
u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
- i0 = le64_to_cpup((__force __le64 *)input);
- i1 = le64_to_cpup((__force __le64 *)x00);
- i2 = le64_to_cpup((__force __le64 *)x01);
- i3 = le64_to_cpup((__force __le64 *)x02);
- i4 = le64_to_cpup((__force __le64 *)x0);
+ i0 = get_unaligned_le64(input);
+ i1 = get_unaligned_le64(x00);
+ i2 = get_unaligned_le64(x01);
+ i3 = get_unaligned_le64(x02);
+ i4 = get_unaligned_le64(x0);
output0 = i0 & 0x7ffffffffffffLLU;
output1 = i1 >> 3 & 0x7ffffffffffffLLU;
output2 = i2 >> 6 & 0x7ffffffffffffLLU;
@@ -688,10 +688,10 @@ static __always_inline void format_fcontract_store(u8 *output, u64 *input)
u8 *b1 = output + 8;
u8 *b2 = output + 16;
u8 *b3 = output + 24;
- *(__force __le64 *)b0 = cpu_to_le64(o0);
- *(__force __le64 *)b1 = cpu_to_le64(o1);
- *(__force __le64 *)b2 = cpu_to_le64(o2);
- *(__force __le64 *)b3 = cpu_to_le64(o3);
+ put_unaligned_le64(o0, b0);
+ put_unaligned_le64(o1, b1);
+ put_unaligned_le64(o2, b2);
+ put_unaligned_le64(o3, b3);
}
static __always_inline void format_fcontract(u8 *output, u64 *input)