summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/crypto/curve25519-generic.h (renamed from src/crypto/curve25519-fiat32.h)596
-rw-r--r--src/crypto/curve25519-hacl64.h739
-rw-r--r--src/crypto/curve25519.c6
-rw-r--r--src/tests/qemu/Makefile2
4 files changed, 471 insertions, 872 deletions
diff --git a/src/crypto/curve25519-fiat32.h b/src/crypto/curve25519-generic.h
index f1e21a4..bbb19cb 100644
--- a/src/crypto/curve25519-fiat32.h
+++ b/src/crypto/curve25519-generic.h
@@ -7,6 +7,347 @@
* https://github.com/mit-plv/fiat-crypto
*/
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+typedef __uint128_t u128;
+
+/* fe means field element. Here the field is \Z/(2^255-19). An element t,
+ * entries t[0]...t[4], represents the integer t[0]+2^51 t[1]+2^102 t[2]+2^153
+ * t[3]+2^204 t[4].
+ * fe limbs are bounded by 1.125*2^51.
+ * Multiplication and carrying produce fe from fe_loose.
+ */
+typedef struct fe { u64 v[5]; } fe;
+
+/* fe_loose limbs are bounded by 3.375*2^51.
+ * Addition and subtraction produce fe_loose from (fe, fe).
+ */
+typedef struct fe_loose { u64 v[5]; } fe_loose;
+
+static __always_inline void fe_frombytes_impl(u64 h[5], const u8 *s)
+{
+ // Ignores top bit of s.
+ u64 a0 = le64_to_cpup((__force __le64 *)(s));
+ u64 a1 = le64_to_cpup((__force __le64 *)(s+8));
+ u64 a2 = le64_to_cpup((__force __le64 *)(s+16));
+ u64 a3 = le64_to_cpup((__force __le64 *)(s+24));
+ // Use 51 bits, 64-51 = 13 left.
+ h[0] = a0 & ((1ULL << 51) - 1);
+ // (64-51) + 38 = 13 + 38 = 51
+ h[1] = (a0 >> 51) | ((a1 & ((1ULL << 38) - 1)) << 13);
+ // (64-38) + 25 = 26 + 25 = 51
+ h[2] = (a1 >> 38) | ((a2 & ((1ULL << 25) - 1)) << 26);
+ // (64-25) + 12 = 39 + 12 = 51
+ h[3] = (a2 >> 25) | ((a3 & ((1ULL << 12) - 1)) << 39);
+ // (64-12) = 52, ignore top bit
+ h[4] = (a3 >> 12) & ((1ULL << 51) - 1);
+}
+
+static __always_inline u8 /*bool*/ addcarryx_u51(u8 /*bool*/ c, u64 a, u64 b, u64 *low)
+{
+ /* This function extracts 51 bits of result and 1 bit of carry (52 total), so
+ *a 64-bit intermediate is sufficient.
+ */
+ u64 x = a + b + c;
+ *low = x & ((1ULL << 51) - 1);
+ return (x >> 51) & 1;
+}
+
+static __always_inline u8 /*bool*/ subborrow_u51(u8 /*bool*/ c, u64 a, u64 b, u64 *low)
+{
+ /* This function extracts 51 bits of result and 1 bit of borrow (52 total), so
+ * a 64-bit intermediate is sufficient.
+ */
+ u64 x = a - b - c;
+ *low = x & ((1ULL << 51) - 1);
+ return x >> 63;
+}
+
+static __always_inline u64 cmovznz64(u64 t, u64 z, u64 nz)
+{
+ /* all set if nonzero, 0 if 0 */
+ t = -!!t;
+ return (t&nz) | ((~t)&z);
+}
+
+static __always_inline void fe_freeze(u64 out[5], const u64 in1[5])
+{
+ { const u64 x7 = in1[4];
+ { const u64 x8 = in1[3];
+ { const u64 x6 = in1[2];
+ { const u64 x4 = in1[1];
+ { const u64 x2 = in1[0];
+ { u64 x10; u8/*bool*/ x11 = subborrow_u51(0x0, x2, 0x7ffffffffffed, &x10);
+ { u64 x13; u8/*bool*/ x14 = subborrow_u51(x11, x4, 0x7ffffffffffff, &x13);
+ { u64 x16; u8/*bool*/ x17 = subborrow_u51(x14, x6, 0x7ffffffffffff, &x16);
+ { u64 x19; u8/*bool*/ x20 = subborrow_u51(x17, x8, 0x7ffffffffffff, &x19);
+ { u64 x22; u8/*bool*/ x23 = subborrow_u51(x20, x7, 0x7ffffffffffff, &x22);
+ { u64 x24 = cmovznz64(x23, 0x0, 0xffffffffffffffffL);
+ { u64 x25 = (x24 & 0x7ffffffffffed);
+ { u64 x27; u8/*bool*/ x28 = addcarryx_u51(0x0, x10, x25, &x27);
+ { u64 x29 = (x24 & 0x7ffffffffffff);
+ { u64 x31; u8/*bool*/ x32 = addcarryx_u51(x28, x13, x29, &x31);
+ { u64 x33 = (x24 & 0x7ffffffffffff);
+ { u64 x35; u8/*bool*/ x36 = addcarryx_u51(x32, x16, x33, &x35);
+ { u64 x37 = (x24 & 0x7ffffffffffff);
+ { u64 x39; u8/*bool*/ x40 = addcarryx_u51(x36, x19, x37, &x39);
+ { u64 x41 = (x24 & 0x7ffffffffffff);
+ { u64 x43; addcarryx_u51(x40, x22, x41, &x43);
+ out[0] = x27;
+ out[1] = x31;
+ out[2] = x35;
+ out[3] = x39;
+ out[4] = x43;
+ }}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_tobytes(u8 s[32], const fe *f)
+{
+ u64 h[5];
+ fe_freeze(h, f->v);
+
+ s[0] = h[0] >> 0;
+ s[1] = h[0] >> 8;
+ s[2] = h[0] >> 16;
+ s[3] = h[0] >> 24;
+ s[4] = h[0] >> 32;
+ s[5] = h[0] >> 40;
+ s[6] = (h[0] >> 48) | (h[1] << 3);
+ s[7] = h[1] >> 5;
+ s[8] = h[1] >> 13;
+ s[9] = h[1] >> 21;
+ s[10] = h[1] >> 29;
+ s[11] = h[1] >> 37;
+ s[12] = (h[1] >> 45) | (h[2] << 6);
+ s[13] = h[2] >> 2;
+ s[14] = h[2] >> 10;
+ s[15] = h[2] >> 18;
+ s[16] = h[2] >> 26;
+ s[17] = h[2] >> 34;
+ s[18] = h[2] >> 42;
+ s[19] = (h[2] >> 50) | (h[3] << 1);
+ s[20] = h[3] >> 7;
+ s[21] = h[3] >> 15;
+ s[22] = h[3] >> 23;
+ s[23] = h[3] >> 31;
+ s[24] = h[3] >> 39;
+ s[25] = (h[3] >> 47) | (h[4] << 4);
+ s[26] = h[4] >> 4;
+ s[27] = h[4] >> 12;
+ s[28] = h[4] >> 20;
+ s[29] = h[4] >> 28;
+ s[30] = h[4] >> 36;
+ s[31] = h[4] >> 44;
+}
+
+static __always_inline void fe_add_impl(u64 out[5], const u64 in1[5], const u64 in2[5])
+{
+ { const u64 x10 = in1[4];
+ { const u64 x11 = in1[3];
+ { const u64 x9 = in1[2];
+ { const u64 x7 = in1[1];
+ { const u64 x5 = in1[0];
+ { const u64 x18 = in2[4];
+ { const u64 x19 = in2[3];
+ { const u64 x17 = in2[2];
+ { const u64 x15 = in2[1];
+ { const u64 x13 = in2[0];
+ out[0] = (x5 + x13);
+ out[1] = (x7 + x15);
+ out[2] = (x9 + x17);
+ out[3] = (x11 + x19);
+ out[4] = (x10 + x18);
+ }}}}}}}}}}
+}
+
+static __always_inline void fe_sub_impl(u64 out[5], const u64 in1[5], const u64 in2[5])
+{
+ { const u64 x10 = in1[4];
+ { const u64 x11 = in1[3];
+ { const u64 x9 = in1[2];
+ { const u64 x7 = in1[1];
+ { const u64 x5 = in1[0];
+ { const u64 x18 = in2[4];
+ { const u64 x19 = in2[3];
+ { const u64 x17 = in2[2];
+ { const u64 x15 = in2[1];
+ { const u64 x13 = in2[0];
+ out[0] = ((0xfffffffffffda + x5) - x13);
+ out[1] = ((0xffffffffffffe + x7) - x15);
+ out[2] = ((0xffffffffffffe + x9) - x17);
+ out[3] = ((0xffffffffffffe + x11) - x19);
+ out[4] = ((0xffffffffffffe + x10) - x18);
+ }}}}}}}}}}
+}
+
+static __always_inline void fe_mul_impl(u64 out[5], const u64 in1[5], const u64 in2[5])
+{
+ { const u64 x10 = in1[4];
+ { const u64 x11 = in1[3];
+ { const u64 x9 = in1[2];
+ { const u64 x7 = in1[1];
+ { const u64 x5 = in1[0];
+ { const u64 x18 = in2[4];
+ { const u64 x19 = in2[3];
+ { const u64 x17 = in2[2];
+ { const u64 x15 = in2[1];
+ { const u64 x13 = in2[0];
+ { u128 x20 = ((u128)x5 * x13);
+ { u128 x21 = (((u128)x5 * x15) + ((u128)x7 * x13));
+ { u128 x22 = ((((u128)x5 * x17) + ((u128)x9 * x13)) + ((u128)x7 * x15));
+ { u128 x23 = (((((u128)x5 * x19) + ((u128)x11 * x13)) + ((u128)x7 * x17)) + ((u128)x9 * x15));
+ { u128 x24 = ((((((u128)x5 * x18) + ((u128)x10 * x13)) + ((u128)x11 * x15)) + ((u128)x7 * x19)) + ((u128)x9 * x17));
+ { u64 x25 = (x10 * 0x13);
+ { u64 x26 = (x7 * 0x13);
+ { u64 x27 = (x9 * 0x13);
+ { u64 x28 = (x11 * 0x13);
+ { u128 x29 = ((((x20 + ((u128)x25 * x15)) + ((u128)x26 * x18)) + ((u128)x27 * x19)) + ((u128)x28 * x17));
+ { u128 x30 = (((x21 + ((u128)x25 * x17)) + ((u128)x27 * x18)) + ((u128)x28 * x19));
+ { u128 x31 = ((x22 + ((u128)x25 * x19)) + ((u128)x28 * x18));
+ { u128 x32 = (x23 + ((u128)x25 * x18));
+ { u64 x33 = (u64) (x29 >> 0x33);
+ { u64 x34 = ((u64)x29 & 0x7ffffffffffff);
+ { u128 x35 = (x33 + x30);
+ { u64 x36 = (u64) (x35 >> 0x33);
+ { u64 x37 = ((u64)x35 & 0x7ffffffffffff);
+ { u128 x38 = (x36 + x31);
+ { u64 x39 = (u64) (x38 >> 0x33);
+ { u64 x40 = ((u64)x38 & 0x7ffffffffffff);
+ { u128 x41 = (x39 + x32);
+ { u64 x42 = (u64) (x41 >> 0x33);
+ { u64 x43 = ((u64)x41 & 0x7ffffffffffff);
+ { u128 x44 = (x42 + x24);
+ { u64 x45 = (u64) (x44 >> 0x33);
+ { u64 x46 = ((u64)x44 & 0x7ffffffffffff);
+ { u64 x47 = (x34 + (0x13 * x45));
+ { u64 x48 = (x47 >> 0x33);
+ { u64 x49 = (x47 & 0x7ffffffffffff);
+ { u64 x50 = (x48 + x37);
+ { u64 x51 = (x50 >> 0x33);
+ { u64 x52 = (x50 & 0x7ffffffffffff);
+ out[0] = x49;
+ out[1] = x52;
+ out[2] = (x51 + x40);
+ out[3] = x43;
+ out[4] = x46;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_sqr_impl(u64 out[5], const u64 in1[5])
+{
+ { const u64 x7 = in1[4];
+ { const u64 x8 = in1[3];
+ { const u64 x6 = in1[2];
+ { const u64 x4 = in1[1];
+ { const u64 x2 = in1[0];
+ { u64 x9 = (x2 * 0x2);
+ { u64 x10 = (x4 * 0x2);
+ { u64 x11 = ((x6 * 0x2) * 0x13);
+ { u64 x12 = (x7 * 0x13);
+ { u64 x13 = (x12 * 0x2);
+ { u128 x14 = ((((u128)x2 * x2) + ((u128)x13 * x4)) + ((u128)x11 * x8));
+ { u128 x15 = ((((u128)x9 * x4) + ((u128)x13 * x6)) + ((u128)x8 * (x8 * 0x13)));
+ { u128 x16 = ((((u128)x9 * x6) + ((u128)x4 * x4)) + ((u128)x13 * x8));
+ { u128 x17 = ((((u128)x9 * x8) + ((u128)x10 * x6)) + ((u128)x7 * x12));
+ { u128 x18 = ((((u128)x9 * x7) + ((u128)x10 * x8)) + ((u128)x6 * x6));
+ { u64 x19 = (u64) (x14 >> 0x33);
+ { u64 x20 = ((u64)x14 & 0x7ffffffffffff);
+ { u128 x21 = (x19 + x15);
+ { u64 x22 = (u64) (x21 >> 0x33);
+ { u64 x23 = ((u64)x21 & 0x7ffffffffffff);
+ { u128 x24 = (x22 + x16);
+ { u64 x25 = (u64) (x24 >> 0x33);
+ { u64 x26 = ((u64)x24 & 0x7ffffffffffff);
+ { u128 x27 = (x25 + x17);
+ { u64 x28 = (u64) (x27 >> 0x33);
+ { u64 x29 = ((u64)x27 & 0x7ffffffffffff);
+ { u128 x30 = (x28 + x18);
+ { u64 x31 = (u64) (x30 >> 0x33);
+ { u64 x32 = ((u64)x30 & 0x7ffffffffffff);
+ { u64 x33 = (x20 + (0x13 * x31));
+ { u64 x34 = (x33 >> 0x33);
+ { u64 x35 = (x33 & 0x7ffffffffffff);
+ { u64 x36 = (x34 + x23);
+ { u64 x37 = (x36 >> 0x33);
+ { u64 x38 = (x36 & 0x7ffffffffffff);
+ out[0] = x35;
+ out[1] = x38;
+ out[2] = (x37 + x26);
+ out[3] = x29;
+ out[4] = x32;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+/* Replace (f,g) with (g,f) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}
+ */
+static __always_inline void fe_cswap(fe *f, fe *g, u64 b)
+{
+ unsigned i;
+ b = 0-b;
+ for (i = 0; i < 5; i++) {
+ u64 x = f->v[i] ^ g->v[i];
+ x &= b;
+ f->v[i] ^= x;
+ g->v[i] ^= x;
+ }
+}
+
+/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/
+static __always_inline void fe_mul_121666_impl(u64 out[5], const u64 in1[5])
+{
+ { const u64 x10 = in1[4];
+ { const u64 x11 = in1[3];
+ { const u64 x9 = in1[2];
+ { const u64 x7 = in1[1];
+ { const u64 x5 = in1[0];
+ { const u64 x18 = 0;
+ { const u64 x19 = 0;
+ { const u64 x17 = 0;
+ { const u64 x15 = 0;
+ { const u64 x13 = 121666;
+ { u128 x20 = ((u128)x5 * x13);
+ { u128 x21 = (((u128)x5 * x15) + ((u128)x7 * x13));
+ { u128 x22 = ((((u128)x5 * x17) + ((u128)x9 * x13)) + ((u128)x7 * x15));
+ { u128 x23 = (((((u128)x5 * x19) + ((u128)x11 * x13)) + ((u128)x7 * x17)) + ((u128)x9 * x15));
+ { u128 x24 = ((((((u128)x5 * x18) + ((u128)x10 * x13)) + ((u128)x11 * x15)) + ((u128)x7 * x19)) + ((u128)x9 * x17));
+ { u64 x25 = (x10 * 0x13);
+ { u64 x26 = (x7 * 0x13);
+ { u64 x27 = (x9 * 0x13);
+ { u64 x28 = (x11 * 0x13);
+ { u128 x29 = ((((x20 + ((u128)x25 * x15)) + ((u128)x26 * x18)) + ((u128)x27 * x19)) + ((u128)x28 * x17));
+ { u128 x30 = (((x21 + ((u128)x25 * x17)) + ((u128)x27 * x18)) + ((u128)x28 * x19));
+ { u128 x31 = ((x22 + ((u128)x25 * x19)) + ((u128)x28 * x18));
+ { u128 x32 = (x23 + ((u128)x25 * x18));
+ { u64 x33 = (u64) (x29 >> 0x33);
+ { u64 x34 = ((u64)x29 & 0x7ffffffffffff);
+ { u128 x35 = (x33 + x30);
+ { u64 x36 = (u64) (x35 >> 0x33);
+ { u64 x37 = ((u64)x35 & 0x7ffffffffffff);
+ { u128 x38 = (x36 + x31);
+ { u64 x39 = (u64) (x38 >> 0x33);
+ { u64 x40 = ((u64)x38 & 0x7ffffffffffff);
+ { u128 x41 = (x39 + x32);
+ { u64 x42 = (u64) (x41 >> 0x33);
+ { u64 x43 = ((u64)x41 & 0x7ffffffffffff);
+ { u128 x44 = (x42 + x24);
+ { u64 x45 = (u64) (x44 >> 0x33);
+ { u64 x46 = ((u64)x44 & 0x7ffffffffffff);
+ { u64 x47 = (x34 + (0x13 * x45));
+ { u64 x48 = (x47 >> 0x33);
+ { u64 x49 = (x47 & 0x7ffffffffffff);
+ { u64 x50 = (x48 + x37);
+ { u64 x51 = (x50 >> 0x33);
+ { u64 x52 = (x50 & 0x7ffffffffffff);
+ out[0] = x49;
+ out[1] = x52;
+ out[2] = (x51 + x40);
+ out[3] = x43;
+ out[4] = x46;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+#else
/* fe means field element. Here the field is \Z/(2^255-19). An element t,
* entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
* t[3]+2^102 t[4]+...+2^230 t[9].
@@ -43,11 +384,6 @@ static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s)
h[9] = (a7>> 6)&((1<<25)-1); /* 25 */
}
-static __always_inline void fe_frombytes(fe *h, const u8 *s)
-{
- fe_frombytes_impl(h->v, s);
-}
-
static __always_inline u8 /*bool*/ addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
{
/* This function extracts 25 bits of result and 1 bit of carry (26 total), so
@@ -188,30 +524,6 @@ static __always_inline void fe_tobytes(u8 s[32], const fe *f)
s[31] = h[9] >> 18;
}
-/* h = f */
-static __always_inline void fe_copy(fe *h, const fe *f)
-{
- memmove(h, f, sizeof(u32) * 10);
-}
-
-static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
-{
- memmove(h, f, sizeof(u32) * 10);
-}
-
-/* h = 0 */
-static __always_inline void fe_0(fe *h)
-{
- memset(h, 0, sizeof(u32) * 10);
-}
-
-/* h = 1 */
-static __always_inline void fe_1(fe *h)
-{
- memset(h, 0, sizeof(u32) * 10);
- h->v[0] = 1;
-}
-
static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
@@ -247,14 +559,6 @@ static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
}}}}}}}}}}}}}}}}}}}}
}
-/* h = f + g
- * Can overlap h with f or g.
- */
-static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
-{
- fe_add_impl(h->v, f->v, g->v);
-}
-
static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
@@ -290,14 +594,6 @@ static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
}}}}}}}}}}}}}}}}}}}}
}
-/* h = f - g
- * Can overlap h with f or g.
- */
-static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
-{
- fe_sub_impl(h->v, f->v, g->v);
-}
-
static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
@@ -414,21 +710,6 @@ static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
-static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
-{
- fe_mul_impl(h->v, f->v, g->v);
-}
-
-static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
-{
- fe_mul_impl(h->v, f->v, g->v);
-}
-
-static __always_inline void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
-{
- fe_mul_impl(h->v, f->v, g->v);
-}
-
static void fe_sqr_impl(u32 out[10], const u32 in1[10])
{
{ const u32 x17 = in1[9];
@@ -535,73 +816,6 @@ static void fe_sqr_impl(u32 out[10], const u32 in1[10])
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
-static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
-{
- fe_sqr_impl(h->v, f->v);
-}
-
-static __always_inline void fe_sq_tt(fe *h, const fe *f)
-{
- fe_sqr_impl(h->v, f->v);
-}
-
-static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
-{
- fe t0;
- fe t1;
- fe t2;
- fe t3;
- int i;
-
- fe_sq_tl(&t0, z);
- fe_sq_tt(&t1, &t0);
- for (i = 1; i < 2; ++i)
- fe_sq_tt(&t1, &t1);
- fe_mul_tlt(&t1, z, &t1);
- fe_mul_ttt(&t0, &t0, &t1);
- fe_sq_tt(&t2, &t0);
- fe_mul_ttt(&t1, &t1, &t2);
- fe_sq_tt(&t2, &t1);
- for (i = 1; i < 5; ++i)
- fe_sq_tt(&t2, &t2);
- fe_mul_ttt(&t1, &t2, &t1);
- fe_sq_tt(&t2, &t1);
- for (i = 1; i < 10; ++i)
- fe_sq_tt(&t2, &t2);
- fe_mul_ttt(&t2, &t2, &t1);
- fe_sq_tt(&t3, &t2);
- for (i = 1; i < 20; ++i)
- fe_sq_tt(&t3, &t3);
- fe_mul_ttt(&t2, &t3, &t2);
- fe_sq_tt(&t2, &t2);
- for (i = 1; i < 10; ++i)
- fe_sq_tt(&t2, &t2);
- fe_mul_ttt(&t1, &t2, &t1);
- fe_sq_tt(&t2, &t1);
- for (i = 1; i < 50; ++i)
- fe_sq_tt(&t2, &t2);
- fe_mul_ttt(&t2, &t2, &t1);
- fe_sq_tt(&t3, &t2);
- for (i = 1; i < 100; ++i)
- fe_sq_tt(&t3, &t3);
- fe_mul_ttt(&t2, &t3, &t2);
- fe_sq_tt(&t2, &t2);
- for (i = 1; i < 50; ++i)
- fe_sq_tt(&t2, &t2);
- fe_mul_ttt(&t1, &t2, &t1);
- fe_sq_tt(&t1, &t1);
- for (i = 1; i < 5; ++i)
- fe_sq_tt(&t1, &t1);
- fe_mul_ttt(out, &t1, &t0);
-}
-
-static __always_inline void fe_invert(fe *out, const fe *z)
-{
- fe_loose l;
- fe_copy_lt(&l, z);
- fe_loose_invert(out, &l);
-}
-
/* Replace (f,g) with (g,f) if b == 1;
* replace (f,g) with (f,g) if b == 0.
*
@@ -735,6 +949,134 @@ static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10])
out[9] = x114;
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
+#endif
+
+static __always_inline void fe_frombytes(fe *h, const u8 *s)
+{
+ fe_frombytes_impl(h->v, s);
+}
+
+/* h = f */
+static __always_inline void fe_copy(fe *h, const fe *f)
+{
+ memmove(h, f, sizeof(fe));
+}
+
+static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
+{
+ memmove(h, f, sizeof(fe));
+}
+
+/* h = 0 */
+static __always_inline void fe_0(fe *h)
+{
+ memset(h, 0, sizeof(fe));
+}
+
+/* h = 1 */
+static __always_inline void fe_1(fe *h)
+{
+ memset(h, 0, sizeof(fe));
+ h->v[0] = 1;
+}
+
+/* h = f + g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_add_impl(h->v, f->v, g->v);
+}
+
+/* h = f - g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_sub_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_sq_tt(fe *h, const fe *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+ fe_sq_tl(&t0, z);
+ fe_sq_tt(&t1, &t0);
+ for (i = 1; i < 2; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_tlt(&t1, z, &t1);
+ fe_mul_ttt(&t0, &t0, &t1);
+ fe_sq_tt(&t2, &t0);
+ fe_mul_ttt(&t1, &t1, &t2);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 20; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 100; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t1, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_ttt(out, &t1, &t0);
+}
+
+static __always_inline void fe_invert(fe *out, const fe *z)
+{
+ fe_loose l;
+ fe_copy_lt(&l, z);
+ fe_loose_invert(out, &l);
+}
static __always_inline void fe_mul121666(fe *h, const fe_loose *f)
{
diff --git a/src/crypto/curve25519-hacl64.h b/src/crypto/curve25519-hacl64.h
deleted file mode 100644
index e3ddea9..0000000
--- a/src/crypto/curve25519-hacl64.h
+++ /dev/null
@@ -1,739 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
- * Copyright (C) 2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This is a machine-generated formally verified implementation of curve25519 DH from:
- * https://github.com/mitls/hacl-star
- */
-
-typedef __uint128_t u128;
-static __always_inline u64 u64_eq_mask(u64 x, u64 y)
-{
- x = ~(x ^ y);
- x &= x << 32;
- x &= x << 16;
- x &= x << 8;
- x &= x << 4;
- x &= x << 2;
- x &= x << 1;
- return ((s64)x) >> 63;
-}
-
-static __always_inline u64 u64_gte_mask(u64 x, u64 y)
-{
- u64 low63 = ~((u64)((s64)((s64)(x & 0x7fffffffffffffffLLU) - (s64)(y & 0x7fffffffffffffffLLU)) >> 63));
- u64 high_bit = ~((u64)((s64)((s64)(x & 0x8000000000000000LLU) - (s64)(y & 0x8000000000000000LLU)) >> 63));
- return low63 & high_bit;
-}
-
-static __always_inline void modulo_carry_top(u64 *b)
-{
- u64 b4 = b[4];
- u64 b0 = b[0];
- u64 b4_ = b4 & 0x7ffffffffffffLLU;
- u64 b0_ = b0 + 19 * (b4 >> 51);
- b[4] = b4_;
- b[0] = b0_;
-}
-
-static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
-{
- {
- u128 xi = input[0];
- output[0] = ((u64)(xi));
- }
- {
- u128 xi = input[1];
- output[1] = ((u64)(xi));
- }
- {
- u128 xi = input[2];
- output[2] = ((u64)(xi));
- }
- {
- u128 xi = input[3];
- output[3] = ((u64)(xi));
- }
- {
- u128 xi = input[4];
- output[4] = ((u64)(xi));
- }
-}
-
-static __always_inline void fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
-{
- u32 i;
- for (i = 0; i < 5; ++i) {
- u128 xi = output[i];
- u64 yi = input[i];
- output[i] = ((xi) + (((u128)(yi) * (s))));
- }
-}
-
-static __always_inline void fproduct_carry_wide_(u128 *tmp)
-{
- u32 i;
- for (i = 0; i < 4; ++i) {
- u32 ctr = i;
- u128 tctr = tmp[ctr];
- u128 tctrp1 = tmp[ctr + 1];
- u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
- u128 c = ((tctr) >> (51));
- tmp[ctr] = ((u128)(r0));
- tmp[ctr + 1] = ((tctrp1) + (c));
- }
-}
-
-static __always_inline void fmul_shift_reduce(u64 *output)
-{
- u64 tmp = output[4];
- u64 b0;
- {
- u32 ctr = 5 - 0 - 1;
- u64 z = output[ctr - 1];
- output[ctr] = z;
- }
- {
- u32 ctr = 5 - 1 - 1;
- u64 z = output[ctr - 1];
- output[ctr] = z;
- }
- {
- u32 ctr = 5 - 2 - 1;
- u64 z = output[ctr - 1];
- output[ctr] = z;
- }
- {
- u32 ctr = 5 - 3 - 1;
- u64 z = output[ctr - 1];
- output[ctr] = z;
- }
- output[0] = tmp;
- b0 = output[0];
- output[0] = 19 * b0;
-}
-
-static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, u64 *input21)
-{
- u32 i;
- u64 input2i;
- {
- u64 input2i = input21[0];
- fproduct_sum_scalar_multiplication_(output, input, input2i);
- fmul_shift_reduce(input);
- }
- {
- u64 input2i = input21[1];
- fproduct_sum_scalar_multiplication_(output, input, input2i);
- fmul_shift_reduce(input);
- }
- {
- u64 input2i = input21[2];
- fproduct_sum_scalar_multiplication_(output, input, input2i);
- fmul_shift_reduce(input);
- }
- {
- u64 input2i = input21[3];
- fproduct_sum_scalar_multiplication_(output, input, input2i);
- fmul_shift_reduce(input);
- }
- i = 4;
- input2i = input21[i];
- fproduct_sum_scalar_multiplication_(output, input, input2i);
-}
-
-static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
-{
- u64 tmp[5];
- memcpy(tmp, input, 5 * sizeof(*input));
- {
- u128 b4;
- u128 b0;
- u128 b4_;
- u128 b0_;
- u64 i0;
- u64 i1;
- u64 i0_;
- u64 i1_;
- u128 t[5];
- {
- u32 _i;
- for (_i = 0; _i < 5; ++_i)
- t[_i] = ((u128)(0));
- }
- fmul_mul_shift_reduce_(t, tmp, input21);
- fproduct_carry_wide_(t);
- b4 = t[4];
- b0 = t[0];
- b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
- b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
- t[4] = b4_;
- t[0] = b0_;
- fproduct_copy_from_wide_(output, t);
- i0 = output[0];
- i1 = output[1];
- i0_ = i0 & 0x7ffffffffffffLLU;
- i1_ = i1 + (i0 >> 51);
- output[0] = i0_;
- output[1] = i1_;
- }
-}
-
-static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
-{
- u64 r0 = output[0];
- u64 r1 = output[1];
- u64 r2 = output[2];
- u64 r3 = output[3];
- u64 r4 = output[4];
- u64 d0 = r0 * 2;
- u64 d1 = r1 * 2;
- u64 d2 = r2 * 2 * 19;
- u64 d419 = r4 * 19;
- u64 d4 = d419 * 2;
- u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) + (((u128)(d2) * (r3))));
- u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) + (((u128)(r3 * 19) * (r3))));
- u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) + (((u128)(d4) * (r3))));
- u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) + (((u128)(r4) * (d419))));
- u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) + (((u128)(r2) * (r2))));
- tmp[0] = s0;
- tmp[1] = s1;
- tmp[2] = s2;
- tmp[3] = s3;
- tmp[4] = s4;
-}
-
-static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
-{
- u128 b4;
- u128 b0;
- u128 b4_;
- u128 b0_;
- u64 i0;
- u64 i1;
- u64 i0_;
- u64 i1_;
- fsquare_fsquare__(tmp, output);
- fproduct_carry_wide_(tmp);
- b4 = tmp[4];
- b0 = tmp[0];
- b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
- b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
- tmp[4] = b4_;
- tmp[0] = b0_;
- fproduct_copy_from_wide_(output, tmp);
- i0 = output[0];
- i1 = output[1];
- i0_ = i0 & 0x7ffffffffffffLLU;
- i1_ = i1 + (i0 >> 51);
- output[0] = i0_;
- output[1] = i1_;
-}
-
-static __always_inline void fsquare_fsquare_times_(u64 *input, u128 *tmp, u32 count1)
-{
- u32 i;
- fsquare_fsquare_(tmp, input);
- for (i = 1; i < count1; ++i)
- fsquare_fsquare_(tmp, input);
-}
-
-static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, u32 count1)
-{
- u128 t[5];
- {
- u32 _i;
- for (_i = 0; _i < 5; ++_i)
- t[_i] = ((u128)(0));
- }
- memcpy(output, input, 5 * sizeof(*input));
- fsquare_fsquare_times_(output, t, count1);
-}
-
-static __always_inline void fsquare_fsquare_times_inplace(u64 *output, u32 count1)
-{
- u128 t[5];
- {
- u32 _i;
- for (_i = 0; _i < 5; ++_i)
- t[_i] = ((u128)(0));
- }
- fsquare_fsquare_times_(output, t, count1);
-}
-
-static __always_inline void crecip_crecip(u64 *out, u64 *z)
-{
- u64 buf[20] = { 0 };
- u64 *a0 = buf;
- u64 *t00 = buf + 5;
- u64 *b0 = buf + 10;
- u64 *t01;
- u64 *b1;
- u64 *c0;
- u64 *a;
- u64 *t0;
- u64 *b;
- u64 *c;
- fsquare_fsquare_times(a0, z, 1);
- fsquare_fsquare_times(t00, a0, 2);
- fmul_fmul(b0, t00, z);
- fmul_fmul(a0, b0, a0);
- fsquare_fsquare_times(t00, a0, 1);
- fmul_fmul(b0, t00, b0);
- fsquare_fsquare_times(t00, b0, 5);
- t01 = buf + 5;
- b1 = buf + 10;
- c0 = buf + 15;
- fmul_fmul(b1, t01, b1);
- fsquare_fsquare_times(t01, b1, 10);
- fmul_fmul(c0, t01, b1);
- fsquare_fsquare_times(t01, c0, 20);
- fmul_fmul(t01, t01, c0);
- fsquare_fsquare_times_inplace(t01, 10);
- fmul_fmul(b1, t01, b1);
- fsquare_fsquare_times(t01, b1, 50);
- a = buf;
- t0 = buf + 5;
- b = buf + 10;
- c = buf + 15;
- fmul_fmul(c, t0, b);
- fsquare_fsquare_times(t0, c, 100);
- fmul_fmul(t0, t0, c);
- fsquare_fsquare_times_inplace(t0, 50);
- fmul_fmul(t0, t0, b);
- fsquare_fsquare_times_inplace(t0, 5);
- fmul_fmul(out, t0, a);
-}
-
-static __always_inline void fsum(u64 *a, u64 *b)
-{
- u32 i;
- for (i = 0; i < 5; ++i) {
- u64 xi = a[i];
- u64 yi = b[i];
- a[i] = xi + yi;
- }
-}
-
-static __always_inline void fdifference(u64 *a, u64 *b)
-{
- u64 tmp[5] = { 0 };
- u64 b0;
- u64 b1;
- u64 b2;
- u64 b3;
- u64 b4;
- memcpy(tmp, b, 5 * sizeof(*b));
- b0 = tmp[0];
- b1 = tmp[1];
- b2 = tmp[2];
- b3 = tmp[3];
- b4 = tmp[4];
- tmp[0] = b0 + 0x3fffffffffff68LLU;
- tmp[1] = b1 + 0x3ffffffffffff8LLU;
- tmp[2] = b2 + 0x3ffffffffffff8LLU;
- tmp[3] = b3 + 0x3ffffffffffff8LLU;
- tmp[4] = b4 + 0x3ffffffffffff8LLU;
- {
- u64 xi = a[0];
- u64 yi = tmp[0];
- a[0] = yi - xi;
- }
- {
- u64 xi = a[1];
- u64 yi = tmp[1];
- a[1] = yi - xi;
- }
- {
- u64 xi = a[2];
- u64 yi = tmp[2];
- a[2] = yi - xi;
- }
- {
- u64 xi = a[3];
- u64 yi = tmp[3];
- a[3] = yi - xi;
- }
- {
- u64 xi = a[4];
- u64 yi = tmp[4];
- a[4] = yi - xi;
- }
-}
-
-static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
-{
- u128 tmp[5];
- u128 b4;
- u128 b0;
- u128 b4_;
- u128 b0_;
- {
- u64 xi = b[0];
- tmp[0] = ((u128)(xi) * (s));
- }
- {
- u64 xi = b[1];
- tmp[1] = ((u128)(xi) * (s));
- }
- {
- u64 xi = b[2];
- tmp[2] = ((u128)(xi) * (s));
- }
- {
- u64 xi = b[3];
- tmp[3] = ((u128)(xi) * (s));
- }
- {
- u64 xi = b[4];
- tmp[4] = ((u128)(xi) * (s));
- }
- fproduct_carry_wide_(tmp);
- b4 = tmp[4];
- b0 = tmp[0];
- b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
- b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
- tmp[4] = b4_;
- tmp[0] = b0_;
- fproduct_copy_from_wide_(output, tmp);
-}
-
-static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
-{
- fmul_fmul(output, a, b);
-}
-
-static __always_inline void crecip(u64 *output, u64 *input)
-{
- crecip_crecip(output, input);
-}
-
-static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, u64 swap1, u32 ctr)
-{
- u32 i = ctr - 1;
- u64 ai = a[i];
- u64 bi = b[i];
- u64 x = swap1 & (ai ^ bi);
- u64 ai1 = ai ^ x;
- u64 bi1 = bi ^ x;
- a[i] = ai1;
- b[i] = bi1;
-}
-
-static __always_inline void point_swap_conditional_(u64 *a, u64 *b, u64 swap1, u32 ctr)
-{
- u32 i;
- for (i = ctr; i > 0; --i)
- point_swap_conditional_step(a, b, swap1, i);
-}
-
-static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
-{
- u64 swap1 = 0 - iswap;
- point_swap_conditional_(a, b, swap1, 5);
- point_swap_conditional_(a + 5, b + 5, swap1, 5);
-}
-
-static __always_inline void point_copy(u64 *output, u64 *input)
-{
- memcpy(output, input, 5 * sizeof(*input));
- memcpy(output + 5, input + 5, 5 * sizeof(*input));
-}
-
-static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, u64 *pq, u64 *qmqp)
-{
- u64 *qx = qmqp;
- u64 *x2 = pp;
- u64 *z2 = pp + 5;
- u64 *x3 = ppq;
- u64 *z3 = ppq + 5;
- u64 *x = p;
- u64 *z = p + 5;
- u64 *xprime = pq;
- u64 *zprime = pq + 5;
- u64 buf[40] = { 0 };
- u64 *origx = buf;
- u64 *origxprime0 = buf + 5;
- u64 *xxprime0;
- u64 *zzprime0;
- u64 *origxprime;
- xxprime0 = buf + 25;
- zzprime0 = buf + 30;
- memcpy(origx, x, 5 * sizeof(*x));
- fsum(x, z);
- fdifference(z, origx);
- memcpy(origxprime0, xprime, 5 * sizeof(*xprime));
- fsum(xprime, zprime);
- fdifference(zprime, origxprime0);
- fmul(xxprime0, xprime, z);
- fmul(zzprime0, x, zprime);
- origxprime = buf + 5;
- {
- u64 *xx0;
- u64 *zz0;
- u64 *xxprime;
- u64 *zzprime;
- u64 *zzzprime;
- xx0 = buf + 15;
- zz0 = buf + 20;
- xxprime = buf + 25;
- zzprime = buf + 30;
- zzzprime = buf + 35;
- memcpy(origxprime, xxprime, 5 * sizeof(*xxprime));
- fsum(xxprime, zzprime);
- fdifference(zzprime, origxprime);
- fsquare_fsquare_times(x3, xxprime, 1);
- fsquare_fsquare_times(zzzprime, zzprime, 1);
- fmul(z3, zzzprime, qx);
- fsquare_fsquare_times(xx0, x, 1);
- fsquare_fsquare_times(zz0, z, 1);
- {
- u64 *zzz;
- u64 *xx;
- u64 *zz;
- u64 scalar;
- zzz = buf + 10;
- xx = buf + 15;
- zz = buf + 20;
- fmul(x2, xx, zz);
- fdifference(zz, xx);
- scalar = 121665;
- fscalar(zzz, zz, scalar);
- fsum(zzz, xx);
- fmul(z2, zzz, zz);
- }
- }
-}
-
-static __always_inline void ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, u64 *q, u8 byt)
-{
- u64 bit0 = (u64)(byt >> 7);
- u64 bit;
- point_swap_conditional(nq, nqpq, bit0);
- addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
- bit = (u64)(byt >> 7);
- point_swap_conditional(nq2, nqpq2, bit);
-}
-
-static __always_inline void ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, u64 *q, u8 byt)
-{
- u8 byt1;
- ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
- byt1 = byt << 1;
- ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
-}
-
-static __always_inline void ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, u64 *q, u8 byt, u32 i)
-{
- while (i--) {
- ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
- byt <<= 2;
- }
-}
-
-static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, u64 *q, u32 i)
-{
- while (i--) {
- u8 byte = n1[i];
- ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, 4);
- }
-}
-
-static __always_inline void ladder_cmult(u64 *result, u8 *n1, u64 *q)
-{
- u64 point_buf[40] = { 0 };
- u64 *nq = point_buf;
- u64 *nqpq = point_buf + 10;
- u64 *nq2 = point_buf + 20;
- u64 *nqpq2 = point_buf + 30;
- point_copy(nqpq, q);
- nq[0] = 1;
- ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
- point_copy(result, nq);
-}
-
-static __always_inline void format_fexpand(u64 *output, const u8 *input)
-{
- const u8 *x00 = input + 6;
- const u8 *x01 = input + 12;
- const u8 *x02 = input + 19;
- const u8 *x0 = input + 24;
- u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
- i0 = le64_to_cpup((__force __le64 *)input);
- i1 = le64_to_cpup((__force __le64 *)x00);
- i2 = le64_to_cpup((__force __le64 *)x01);
- i3 = le64_to_cpup((__force __le64 *)x02);
- i4 = le64_to_cpup((__force __le64 *)x0);
- output0 = i0 & 0x7ffffffffffffLLU;
- output1 = i1 >> 3 & 0x7ffffffffffffLLU;
- output2 = i2 >> 6 & 0x7ffffffffffffLLU;
- output3 = i3 >> 1 & 0x7ffffffffffffLLU;
- output4 = i4 >> 12 & 0x7ffffffffffffLLU;
- output[0] = output0;
- output[1] = output1;
- output[2] = output2;
- output[3] = output3;
- output[4] = output4;
-}
-
-static __always_inline void format_fcontract_first_carry_pass(u64 *input)
-{
- u64 t0 = input[0];
- u64 t1 = input[1];
- u64 t2 = input[2];
- u64 t3 = input[3];
- u64 t4 = input[4];
- u64 t1_ = t1 + (t0 >> 51);
- u64 t0_ = t0 & 0x7ffffffffffffLLU;
- u64 t2_ = t2 + (t1_ >> 51);
- u64 t1__ = t1_ & 0x7ffffffffffffLLU;
- u64 t3_ = t3 + (t2_ >> 51);
- u64 t2__ = t2_ & 0x7ffffffffffffLLU;
- u64 t4_ = t4 + (t3_ >> 51);
- u64 t3__ = t3_ & 0x7ffffffffffffLLU;
- input[0] = t0_;
- input[1] = t1__;
- input[2] = t2__;
- input[3] = t3__;
- input[4] = t4_;
-}
-
-static __always_inline void format_fcontract_first_carry_full(u64 *input)
-{
- format_fcontract_first_carry_pass(input);
- modulo_carry_top(input);
-}
-
-static __always_inline void format_fcontract_second_carry_pass(u64 *input)
-{
- u64 t0 = input[0];
- u64 t1 = input[1];
- u64 t2 = input[2];
- u64 t3 = input[3];
- u64 t4 = input[4];
- u64 t1_ = t1 + (t0 >> 51);
- u64 t0_ = t0 & 0x7ffffffffffffLLU;
- u64 t2_ = t2 + (t1_ >> 51);
- u64 t1__ = t1_ & 0x7ffffffffffffLLU;
- u64 t3_ = t3 + (t2_ >> 51);
- u64 t2__ = t2_ & 0x7ffffffffffffLLU;
- u64 t4_ = t4 + (t3_ >> 51);
- u64 t3__ = t3_ & 0x7ffffffffffffLLU;
- input[0] = t0_;
- input[1] = t1__;
- input[2] = t2__;
- input[3] = t3__;
- input[4] = t4_;
-}
-
-static __always_inline void format_fcontract_second_carry_full(u64 *input)
-{
- u64 i0;
- u64 i1;
- u64 i0_;
- u64 i1_;
- format_fcontract_second_carry_pass(input);
- modulo_carry_top(input);
- i0 = input[0];
- i1 = input[1];
- i0_ = i0 & 0x7ffffffffffffLLU;
- i1_ = i1 + (i0 >> 51);
- input[0] = i0_;
- input[1] = i1_;
-}
-
-static __always_inline void format_fcontract_trim(u64 *input)
-{
- u64 a0 = input[0];
- u64 a1 = input[1];
- u64 a2 = input[2];
- u64 a3 = input[3];
- u64 a4 = input[4];
- u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU);
- u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU);
- u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU);
- u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU);
- u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU);
- u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
- u64 a0_ = a0 - (0x7ffffffffffedLLU & mask);
- u64 a1_ = a1 - (0x7ffffffffffffLLU & mask);
- u64 a2_ = a2 - (0x7ffffffffffffLLU & mask);
- u64 a3_ = a3 - (0x7ffffffffffffLLU & mask);
- u64 a4_ = a4 - (0x7ffffffffffffLLU & mask);
- input[0] = a0_;
- input[1] = a1_;
- input[2] = a2_;
- input[3] = a3_;
- input[4] = a4_;
-}
-
-static __always_inline void format_fcontract_store(u8 *output, u64 *input)
-{
- u64 t0 = input[0];
- u64 t1 = input[1];
- u64 t2 = input[2];
- u64 t3 = input[3];
- u64 t4 = input[4];
- u64 o0 = t1 << 51 | t0;
- u64 o1 = t2 << 38 | t1 >> 13;
- u64 o2 = t3 << 25 | t2 >> 26;
- u64 o3 = t4 << 12 | t3 >> 39;
- u8 *b0 = output;
- u8 *b1 = output + 8;
- u8 *b2 = output + 16;
- u8 *b3 = output + 24;
- *(__force __le64 *)b0 = cpu_to_le64(o0);
- *(__force __le64 *)b1 = cpu_to_le64(o1);
- *(__force __le64 *)b2 = cpu_to_le64(o2);
- *(__force __le64 *)b3 = cpu_to_le64(o3);
-}
-
-static __always_inline void format_fcontract(u8 *output, u64 *input)
-{
- format_fcontract_first_carry_full(input);
- format_fcontract_second_carry_full(input);
- format_fcontract_trim(input);
- format_fcontract_store(output, input);
-}
-
-static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
-{
- u64 *x = point;
- u64 *z = point + 5;
- u64 buf[10] __aligned(32) = { 0 };
- u64 *zmone = buf;
- u64 *sc = buf + 5;
- crecip(zmone, z);
- fmul(sc, x, zmone);
- format_fcontract(scalar, sc);
-}
-
-static void curve25519_generic(u8 mypublic[CURVE25519_POINT_SIZE], const u8 secret[CURVE25519_POINT_SIZE], const u8 basepoint[CURVE25519_POINT_SIZE])
-{
- u64 buf0[10] __aligned(32) = { 0 };
- u64 *x0 = buf0;
- u64 *z = buf0 + 5;
- u64 *q;
- format_fexpand(x0, basepoint);
- z[0] = 1;
- q = buf0;
- {
- u8 e[32] __aligned(32) = { 0 };
- u8 *scalar;
- memcpy(e, secret, 32);
- normalize_secret(e);
- scalar = e;
- {
- u64 buf[15] = { 0 };
- u64 *nq = buf;
- u64 *x = nq;
- x[0] = 1;
- ladder_cmult(nq, scalar, q);
- format_scalar_of_point(mypublic, nq);
- memzero_explicit(buf, sizeof(buf));
- }
- memzero_explicit(e, sizeof(e));
- }
- memzero_explicit(buf0, sizeof(buf0));
-}
diff --git a/src/crypto/curve25519.c b/src/crypto/curve25519.c
index eba94cd..38020be 100644
--- a/src/crypto/curve25519.c
+++ b/src/crypto/curve25519.c
@@ -25,11 +25,7 @@ static __always_inline void normalize_secret(u8 secret[CURVE25519_POINT_SIZE])
void __init curve25519_fpu_init(void) { }
#endif
-#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
-#include "curve25519-hacl64.h"
-#else
-#include "curve25519-fiat32.h"
-#endif
+#include "curve25519-generic.h"
static const u8 null_point[CURVE25519_POINT_SIZE] = { 0 };
diff --git a/src/tests/qemu/Makefile b/src/tests/qemu/Makefile
index 97cd80d..7c29955 100644
--- a/src/tests/qemu/Makefile
+++ b/src/tests/qemu/Makefile
@@ -25,7 +25,7 @@ DOWNLOAD := wget -O
MIRROR := https://download.wireguard.com/qemu-test/distfiles/
WIREGUARD_SOURCES := $(wildcard ../../*.c ../../*.h ../../selftest/*.h ../../crypto/*.c ../../crypto/*.h ../../crypto/*.S ../../compat/*.h)
-TOOLS_SOURCES := $(wildcard ../../tools/*.c ../../tools/*.h ../../uapi/*.h ../../crypto/curve25519-hacl64.h ../../crypto/curve25519-fiat32.h)
+TOOLS_SOURCES := $(wildcard ../../tools/*.c ../../tools/*.h ../../uapi/*.h ../../crypto/curve25519-generic.h)
default: qemu