summaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-05-29 16:06:57 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-05-31 01:24:51 +0200
commit101b71c192047d5966b3c59dba088957ca51e588 (patch)
tree1cb72ed6ff1916eb71d038b6b426eef1962f7350 /src
parent520af047f5b84c92202e3f42f49281f37034d1c0 (diff)
chacha20poly1305: split up into separate files
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r--src/Kbuild2
-rw-r--r--src/crypto/chacha20.c239
-rw-r--r--src/crypto/chacha20.h45
-rw-r--r--src/crypto/chacha20poly1305.c642
-rw-r--r--src/crypto/chacha20poly1305.h3
-rw-r--r--src/crypto/poly1305.c375
-rw-r--r--src/crypto/poly1305.h34
-rw-r--r--src/main.c6
-rw-r--r--src/selftest/chacha20poly1305.h19
-rw-r--r--src/selftest/poly1305.h2
10 files changed, 740 insertions, 627 deletions
diff --git a/src/Kbuild b/src/Kbuild
index feb7730..2bfe2bb 100644
--- a/src/Kbuild
+++ b/src/Kbuild
@@ -7,7 +7,7 @@ ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -g
ccflags-y += -Wframe-larger-than=8192
ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
wireguard-y := main.o noise.o device.o peer.o timers.o queueing.o send.o receive.o socket.o hashtables.o allowedips.o ratelimiter.o cookie.o netlink.o
-wireguard-y += crypto/curve25519.o crypto/chacha20poly1305.o crypto/blake2s.o
+wireguard-y += crypto/chacha20.o crypto/poly1305.o crypto/chacha20poly1305.o crypto/curve25519.o crypto/blake2s.o
wireguard-$(CONFIG_X86_64) += crypto/chacha20-x86_64.o crypto/poly1305-x86_64.o crypto/blake2s-x86_64.o
wireguard-$(CONFIG_ARM) += crypto/chacha20-arm.o crypto/poly1305-arm.o crypto/curve25519-arm.o
diff --git a/src/crypto/chacha20.c b/src/crypto/chacha20.c
new file mode 100644
index 0000000..0444d01
--- /dev/null
+++ b/src/crypto/chacha20.c
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include "chacha20.h"
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+
+#if defined(CONFIG_X86_64)
+#include <asm/fpu/api.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/intel-family.h>
+#ifdef CONFIG_AS_SSSE3
+asmlinkage void hchacha20_ssse3(u8 *derived_key, const u8 *nonce, const u8 *key);
+asmlinkage void chacha20_ssse3(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+#endif
+#ifdef CONFIG_AS_AVX2
+asmlinkage void chacha20_avx2(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+#endif
+#ifdef CONFIG_AS_AVX512
+asmlinkage void chacha20_avx512(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+asmlinkage void chacha20_avx512vl(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+#endif
+
+static bool chacha20_use_ssse3 __ro_after_init;
+static bool chacha20_use_avx2 __ro_after_init;
+static bool chacha20_use_avx512 __ro_after_init;
+static bool chacha20_use_avx512vl __ro_after_init;
+
+void __init chacha20_fpu_init(void)
+{
+#ifndef CONFIG_UML
+ chacha20_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3);
+ chacha20_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+#ifndef COMPAT_CANNOT_USE_AVX512
+ chacha20_use_avx512 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
+ boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X;
+ chacha20_use_avx512vl = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL);
+#endif
+#endif
+}
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+asmlinkage void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (!defined(__LINUX_ARM_ARCH__) || __LINUX_ARM_ARCH__ >= 7)
+#define ARM_USE_NEON
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+asmlinkage void chacha20_neon(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+#endif
+static bool chacha20_use_neon __ro_after_init;
+void __init chacha20_fpu_init(void)
+{
+#if defined(CONFIG_ARM64)
+ chacha20_use_neon = elf_hwcap & HWCAP_ASIMD;
+#elif defined(CONFIG_ARM)
+ chacha20_use_neon = elf_hwcap & HWCAP_NEON;
+#endif
+}
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_MIPS32_R2)
+asmlinkage void chacha20_mips(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
+void __init chacha20_fpu_init(void) { }
+#else
+void __init chacha20_fpu_init(void) { }
+#endif
+
+#define EXPAND_32_BYTE_K 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U
+
+#define QUARTER_ROUND(x, a, b, c, d) ( \
+ x[a] += x[b], \
+ x[d] = rol32((x[d] ^ x[a]), 16), \
+ x[c] += x[d], \
+ x[b] = rol32((x[b] ^ x[c]), 12), \
+ x[a] += x[b], \
+ x[d] = rol32((x[d] ^ x[a]), 8), \
+ x[c] += x[d], \
+ x[b] = rol32((x[b] ^ x[c]), 7) \
+)
+
+#define C(i, j) (i * 4 + j)
+
+#define DOUBLE_ROUND(x) ( \
+ /* Column Round */ \
+ QUARTER_ROUND(x, C(0, 0), C(1, 0), C(2, 0), C(3, 0)), \
+ QUARTER_ROUND(x, C(0, 1), C(1, 1), C(2, 1), C(3, 1)), \
+ QUARTER_ROUND(x, C(0, 2), C(1, 2), C(2, 2), C(3, 2)), \
+ QUARTER_ROUND(x, C(0, 3), C(1, 3), C(2, 3), C(3, 3)), \
+ /* Diagonal Round */ \
+ QUARTER_ROUND(x, C(0, 0), C(1, 1), C(2, 2), C(3, 3)), \
+ QUARTER_ROUND(x, C(0, 1), C(1, 2), C(2, 3), C(3, 0)), \
+ QUARTER_ROUND(x, C(0, 2), C(1, 3), C(2, 0), C(3, 1)), \
+ QUARTER_ROUND(x, C(0, 3), C(1, 0), C(2, 1), C(3, 2)) \
+)
+
+#define TWENTY_ROUNDS(x) ( \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x), \
+ DOUBLE_ROUND(x) \
+)
+
+static void chacha20_block_generic(__le32 *stream, u32 *state)
+{
+ u32 x[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(x); ++i)
+ x[i] = state[i];
+
+ TWENTY_ROUNDS(x);
+
+ for (i = 0; i < ARRAY_SIZE(x); ++i)
+ stream[i] = cpu_to_le32(x[i] + state[i]);
+
+ ++state[12];
+}
+
+static void chacha20_generic(u8 *out, const u8 *in, u32 len, const u32 key[8], const u32 counter[4])
+{
+ __le32 buf[CHACHA20_BLOCK_SIZE / sizeof(__le32)];
+ u32 x[] = {
+ EXPAND_32_BYTE_K,
+ key[0], key[1], key[2], key[3],
+ key[4], key[5], key[6], key[7],
+ counter[0], counter[1], counter[2], counter[3]
+ };
+
+ if (out != in)
+ memcpy(out, in, len);
+
+ while (len >= CHACHA20_BLOCK_SIZE) {
+ chacha20_block_generic(buf, x);
+ crypto_xor(out, (u8 *)buf, CHACHA20_BLOCK_SIZE);
+ len -= CHACHA20_BLOCK_SIZE;
+ out += CHACHA20_BLOCK_SIZE;
+ }
+ if (len) {
+ chacha20_block_generic(buf, x);
+ crypto_xor(out, (u8 *)buf, len);
+ }
+}
+
+void chacha20(struct chacha20_ctx *state, u8 *dst, const u8 *src, u32 len, bool have_simd)
+{
+ if (!have_simd
+#if defined(CONFIG_X86_64)
+ || !chacha20_use_ssse3
+
+#elif defined(ARM_USE_NEON)
+ || !chacha20_use_neon
+#endif
+ )
+ goto no_simd;
+
+#if defined(CONFIG_X86_64)
+#ifdef CONFIG_AS_AVX512
+ if (chacha20_use_avx512) {
+ chacha20_avx512(dst, src, len, state->key, state->counter);
+ goto out;
+ }
+ if (chacha20_use_avx512vl) {
+ chacha20_avx512vl(dst, src, len, state->key, state->counter);
+ goto out;
+ }
+#endif
+#ifdef CONFIG_AS_AVX2
+ if (chacha20_use_avx2) {
+ chacha20_avx2(dst, src, len, state->key, state->counter);
+ goto out;
+ }
+#endif
+#ifdef CONFIG_AS_SSSE3
+ chacha20_ssse3(dst, src, len, state->key, state->counter);
+ goto out;
+#endif
+#elif defined(ARM_USE_NEON)
+ chacha20_neon(dst, src, len, state->key, state->counter);
+ goto out;
+#endif
+
+no_simd:
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ chacha20_arm(dst, src, len, state->key, state->counter);
+ goto out;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_MIPS32_R2)
+ chacha20_mips(dst, src, len, state->key, state->counter);
+ goto out;
+#endif
+
+ chacha20_generic(dst, src, len, state->key, state->counter);
+ goto out;
+
+out:
+ state->counter[0] += (len + 63) / 64;
+}
+
+static void hchacha20_generic(u8 derived_key[CHACHA20_KEY_SIZE], const u8 nonce[HCHACHA20_NONCE_SIZE], const u8 key[HCHACHA20_KEY_SIZE])
+{
+ __le32 *out = (__force __le32 *)derived_key;
+ u32 x[] = {
+ EXPAND_32_BYTE_K,
+ le32_to_cpup((__le32 *)(key + 0)), le32_to_cpup((__le32 *)(key + 4)), le32_to_cpup((__le32 *)(key + 8)), le32_to_cpup((__le32 *)(key + 12)),
+ le32_to_cpup((__le32 *)(key + 16)), le32_to_cpup((__le32 *)(key + 20)), le32_to_cpup((__le32 *)(key + 24)), le32_to_cpup((__le32 *)(key + 28)),
+ le32_to_cpup((__le32 *)(nonce + 0)), le32_to_cpup((__le32 *)(nonce + 4)), le32_to_cpup((__le32 *)(nonce + 8)), le32_to_cpup((__le32 *)(nonce + 12))
+ };
+
+ TWENTY_ROUNDS(x);
+
+ out[0] = cpu_to_le32(x[0]);
+ out[1] = cpu_to_le32(x[1]);
+ out[2] = cpu_to_le32(x[2]);
+ out[3] = cpu_to_le32(x[3]);
+ out[4] = cpu_to_le32(x[12]);
+ out[5] = cpu_to_le32(x[13]);
+ out[6] = cpu_to_le32(x[14]);
+ out[7] = cpu_to_le32(x[15]);
+}
+
+void hchacha20(u8 derived_key[CHACHA20_KEY_SIZE], const u8 nonce[HCHACHA20_NONCE_SIZE], const u8 key[HCHACHA20_KEY_SIZE], bool have_simd)
+{
+#if defined(CONFIG_X86_64) && defined(CONFIG_AS_SSSE3)
+ if (have_simd && chacha20_use_ssse3) {
+ hchacha20_ssse3(derived_key, nonce, key);
+ return;
+ }
+#endif
+ hchacha20_generic(derived_key, nonce, key);
+}
diff --git a/src/crypto/chacha20.h b/src/crypto/chacha20.h
new file mode 100644
index 0000000..e3b6b69
--- /dev/null
+++ b/src/crypto/chacha20.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_CHACHA20_H
+#define _WG_CHACHA20_H
+
+#include <linux/types.h>
+
+enum {
+ CHACHA20_IV_SIZE = 16,
+ CHACHA20_KEY_SIZE = 32,
+ CHACHA20_BLOCK_SIZE = 64,
+ HCHACHA20_KEY_SIZE = 32,
+ HCHACHA20_NONCE_SIZE = 16
+};
+
+struct chacha20_ctx {
+ u32 key[8];
+ u32 counter[4];
+} __aligned(32);
+
+void chacha20_fpu_init(void);
+
+static inline void chacha20_init(struct chacha20_ctx *state, const u8 key[CHACHA20_KEY_SIZE], u64 nonce)
+{
+ __le32 *le_key = (__le32 *)key;
+ state->key[0] = le32_to_cpu(le_key[0]);
+ state->key[1] = le32_to_cpu(le_key[1]);
+ state->key[2] = le32_to_cpu(le_key[2]);
+ state->key[3] = le32_to_cpu(le_key[3]);
+ state->key[4] = le32_to_cpu(le_key[4]);
+ state->key[5] = le32_to_cpu(le_key[5]);
+ state->key[6] = le32_to_cpu(le_key[6]);
+ state->key[7] = le32_to_cpu(le_key[7]);
+ state->counter[0] = state->counter[1] = 0;
+ state->counter[2] = nonce & U32_MAX;
+ state->counter[3] = nonce >> 32;
+}
+void chacha20(struct chacha20_ctx *state, u8 *dst, const u8 *src, u32 len, bool have_simd);
+
+void hchacha20(u8 derived_key[CHACHA20_KEY_SIZE], const u8 nonce[HCHACHA20_NONCE_SIZE], const u8 key[HCHACHA20_KEY_SIZE], bool have_simd);
+
+#endif /* _WG_CHACHA20_H */
diff --git a/src/crypto/chacha20poly1305.c b/src/crypto/chacha20poly1305.c
index 13b5ec6..95902fd 100644
--- a/src/crypto/chacha20poly1305.c
+++ b/src/crypto/chacha20poly1305.c
@@ -1,597 +1,14 @@
-/* SPDX-License-Identifier: OpenSSL OR (BSD-3-Clause OR GPL-2.0)
+/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
*/
#include "chacha20poly1305.h"
+#include "chacha20.h"
+#include "poly1305.h"
#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
-#include <asm/unaligned.h>
-
-#if defined(CONFIG_X86_64)
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-#include <asm/intel-family.h>
-asmlinkage void poly1305_init_x86_64(void *ctx, const u8 key[16]);
-asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, size_t len, u32 padbit);
-asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[16], const u32 nonce[4]);
-#ifdef CONFIG_AS_SSSE3
-asmlinkage void hchacha20_ssse3(u8 *derived_key, const u8 *nonce, const u8 *key);
-asmlinkage void chacha20_ssse3(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-#endif
-#ifdef CONFIG_AS_AVX
-asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[16], const u32 nonce[4]);
-asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, size_t len, u32 padbit);
-#endif
-#ifdef CONFIG_AS_AVX2
-asmlinkage void chacha20_avx2(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, size_t len, u32 padbit);
-#endif
-#ifdef CONFIG_AS_AVX512
-asmlinkage void chacha20_avx512(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-asmlinkage void chacha20_avx512vl(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, size_t len, u32 padbit);
-#endif
-
-static bool chacha20poly1305_use_ssse3 __ro_after_init;
-static bool chacha20poly1305_use_avx __ro_after_init;
-static bool chacha20poly1305_use_avx2 __ro_after_init;
-static bool chacha20poly1305_use_avx512 __ro_after_init;
-static bool chacha20poly1305_use_avx512vl __ro_after_init;
-
-void __init chacha20poly1305_fpu_init(void)
-{
-#ifndef CONFIG_UML
- chacha20poly1305_use_ssse3 = boot_cpu_has(X86_FEATURE_SSSE3);
- chacha20poly1305_use_avx = boot_cpu_has(X86_FEATURE_AVX) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
- chacha20poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
-#ifndef COMPAT_CANNOT_USE_AVX512
- chacha20poly1305_use_avx512 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
- boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X;
- chacha20poly1305_use_avx512vl = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && boot_cpu_has(X86_FEATURE_AVX512VL) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL);
-#endif
-#endif
-}
-#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]);
-asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, size_t len, u32 padbit);
-asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]);
-asmlinkage void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (!defined(__LINUX_ARM_ARCH__) || __LINUX_ARM_ARCH__ >= 7)
-#define ARM_USE_NEON
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, size_t len, u32 padbit);
-asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]);
-asmlinkage void chacha20_neon(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-#endif
-static bool chacha20poly1305_use_neon __ro_after_init;
-void __init chacha20poly1305_fpu_init(void)
-{
-#if defined(CONFIG_ARM64)
- chacha20poly1305_use_neon = elf_hwcap & HWCAP_ASIMD;
-#elif defined(CONFIG_ARM)
- chacha20poly1305_use_neon = elf_hwcap & HWCAP_NEON;
-#endif
-}
-#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
-asmlinkage void poly1305_init_mips(void *ctx, const u8 key[16]);
-asmlinkage void poly1305_blocks_mips(void *ctx, const u8 *inp, size_t len, u32 padbit);
-asmlinkage void poly1305_emit_mips(void *ctx, u8 mac[16], const u32 nonce[4]);
-#if defined(CONFIG_CPU_MIPS32_R2)
-asmlinkage void chacha20_mips(u8 *out, const u8 *in, size_t len, const u32 key[8], const u32 counter[4]);
-#endif
-void __init chacha20poly1305_fpu_init(void) { }
-#else
-void __init chacha20poly1305_fpu_init(void) { }
-#endif
-
-enum {
- CHACHA20_IV_SIZE = 16,
- CHACHA20_KEY_SIZE = 32,
- CHACHA20_BLOCK_SIZE = 64,
- POLY1305_BLOCK_SIZE = 16,
- POLY1305_KEY_SIZE = 32,
- POLY1305_MAC_SIZE = 16
-};
-
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
-static inline u64 le64_to_cpuvp(const void *p)
-{
- return le64_to_cpup(p);
-}
-
-struct chacha20_ctx {
- u32 state[CHACHA20_BLOCK_SIZE / sizeof(u32)];
-} __aligned(32);
-
-#define QUARTER_ROUND(x, a, b, c, d) ( \
- x[a] += x[b], \
- x[d] = rol32((x[d] ^ x[a]), 16), \
- x[c] += x[d], \
- x[b] = rol32((x[b] ^ x[c]), 12), \
- x[a] += x[b], \
- x[d] = rol32((x[d] ^ x[a]), 8), \
- x[c] += x[d], \
- x[b] = rol32((x[b] ^ x[c]), 7) \
-)
-
-#define C(i, j) (i * 4 + j)
-
-#define DOUBLE_ROUND(x) ( \
- /* Column Round */ \
- QUARTER_ROUND(x, C(0, 0), C(1, 0), C(2, 0), C(3, 0)), \
- QUARTER_ROUND(x, C(0, 1), C(1, 1), C(2, 1), C(3, 1)), \
- QUARTER_ROUND(x, C(0, 2), C(1, 2), C(2, 2), C(3, 2)), \
- QUARTER_ROUND(x, C(0, 3), C(1, 3), C(2, 3), C(3, 3)), \
- /* Diagonal Round */ \
- QUARTER_ROUND(x, C(0, 0), C(1, 1), C(2, 2), C(3, 3)), \
- QUARTER_ROUND(x, C(0, 1), C(1, 2), C(2, 3), C(3, 0)), \
- QUARTER_ROUND(x, C(0, 2), C(1, 3), C(2, 0), C(3, 1)), \
- QUARTER_ROUND(x, C(0, 3), C(1, 0), C(2, 1), C(3, 2)) \
-)
-
-#define TWENTY_ROUNDS(x) ( \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x), \
- DOUBLE_ROUND(x) \
-)
-
-#define EXPAND_32_BYTE_K 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
-
-static void chacha20_block_generic(struct chacha20_ctx *ctx, __le32 *stream)
-{
- u32 x[CHACHA20_BLOCK_SIZE / sizeof(u32)];
- int i;
-
- for (i = 0; i < ARRAY_SIZE(x); ++i)
- x[i] = ctx->state[i];
-
- TWENTY_ROUNDS(x);
-
- for (i = 0; i < ARRAY_SIZE(x); ++i)
- stream[i] = cpu_to_le32(x[i] + ctx->state[i]);
-
- ++ctx->state[12];
-}
-
-static void hchacha20_generic(u8 derived_key[CHACHA20POLY1305_KEYLEN], const u8 nonce[16], const u8 key[CHACHA20POLY1305_KEYLEN])
-{
- __le32 *out = (__force __le32 *)derived_key;
- u32 x[] = {
- EXPAND_32_BYTE_K,
- le32_to_cpuvp(key + 0), le32_to_cpuvp(key + 4), le32_to_cpuvp(key + 8), le32_to_cpuvp(key + 12),
- le32_to_cpuvp(key + 16), le32_to_cpuvp(key + 20), le32_to_cpuvp(key + 24), le32_to_cpuvp(key + 28),
- le32_to_cpuvp(nonce + 0), le32_to_cpuvp(nonce + 4), le32_to_cpuvp(nonce + 8), le32_to_cpuvp(nonce + 12)
- };
-
- TWENTY_ROUNDS(x);
-
- out[0] = cpu_to_le32(x[0]);
- out[1] = cpu_to_le32(x[1]);
- out[2] = cpu_to_le32(x[2]);
- out[3] = cpu_to_le32(x[3]);
- out[4] = cpu_to_le32(x[12]);
- out[5] = cpu_to_le32(x[13]);
- out[6] = cpu_to_le32(x[14]);
- out[7] = cpu_to_le32(x[15]);
-}
-
-static inline void hchacha20(u8 derived_key[CHACHA20POLY1305_KEYLEN], const u8 nonce[16], const u8 key[CHACHA20POLY1305_KEYLEN], bool have_simd)
-{
-#if defined(CONFIG_X86_64) && defined(CONFIG_AS_SSSE3)
- if (have_simd && chacha20poly1305_use_ssse3) {
- hchacha20_ssse3(derived_key, nonce, key);
- return;
- }
-#endif
-
- hchacha20_generic(derived_key, nonce, key);
-}
-
-#define chacha20_initial_state(key, nonce) {{ \
- EXPAND_32_BYTE_K, \
- le32_to_cpuvp((key) + 0), le32_to_cpuvp((key) + 4), le32_to_cpuvp((key) + 8), le32_to_cpuvp((key) + 12), \
- le32_to_cpuvp((key) + 16), le32_to_cpuvp((key) + 20), le32_to_cpuvp((key) + 24), le32_to_cpuvp((key) + 28), \
- 0, 0, le32_to_cpuvp((nonce) + 0), le32_to_cpuvp((nonce) + 4) \
-}}
-
-static void chacha20_crypt(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 bytes, bool have_simd)
-{
- __le32 buf[CHACHA20_BLOCK_SIZE / sizeof(__le32)];
-
- if (!have_simd
-#if defined(CONFIG_X86_64)
- || !chacha20poly1305_use_ssse3
-
-#elif defined(ARM_USE_NEON)
- || !chacha20poly1305_use_neon
-#endif
- )
- goto no_simd;
-
-#if defined(CONFIG_X86_64)
-#ifdef CONFIG_AS_AVX512
- if (chacha20poly1305_use_avx512) {
- chacha20_avx512(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
- }
- if (chacha20poly1305_use_avx512vl) {
- chacha20_avx512vl(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
- }
-#endif
-#ifdef CONFIG_AS_AVX2
- if (chacha20poly1305_use_avx2) {
- chacha20_avx2(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
- }
-#endif
-#ifdef CONFIG_AS_SSSE3
- chacha20_ssse3(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
-#endif
-#elif defined(ARM_USE_NEON)
- chacha20_neon(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
-#endif
-
-no_simd:
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- chacha20_arm(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
-#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_MIPS32_R2)
- chacha20_mips(dst, src, bytes, &ctx->state[4], &ctx->state[12]);
- ctx->state[12] += (bytes + 63) / 64;
- return;
-#endif
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_block_generic(ctx, buf);
- crypto_xor(dst, (u8 *)buf, CHACHA20_BLOCK_SIZE);
- bytes -= CHACHA20_BLOCK_SIZE;
- dst += CHACHA20_BLOCK_SIZE;
- }
- if (bytes) {
- chacha20_block_generic(ctx, buf);
- crypto_xor(dst, (u8 *)buf, bytes);
- }
-}
-
-struct poly1305_ctx {
- u8 opaque[24 * sizeof(u64)];
- u32 nonce[4];
- u8 data[POLY1305_BLOCK_SIZE];
- size_t num;
-} __aligned(8);
-
-#if !(defined(CONFIG_X86_64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || (defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))))
-struct poly1305_internal {
- u32 h[5];
- u32 r[4];
-};
-
-static void poly1305_init_generic(void *ctx, const u8 key[16])
-{
- struct poly1305_internal *st = (struct poly1305_internal *)ctx;
-
- /* h = 0 */
- st->h[0] = 0;
- st->h[1] = 0;
- st->h[2] = 0;
- st->h[3] = 0;
- st->h[4] = 0;
-
- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- st->r[0] = le32_to_cpuvp(&key[ 0]) & 0x0fffffff;
- st->r[1] = le32_to_cpuvp(&key[ 4]) & 0x0ffffffc;
- st->r[2] = le32_to_cpuvp(&key[ 8]) & 0x0ffffffc;
- st->r[3] = le32_to_cpuvp(&key[12]) & 0x0ffffffc;
-}
-
-static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 padbit)
-{
-#define CONSTANT_TIME_CARRY(a,b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
- struct poly1305_internal *st = (struct poly1305_internal *)ctx;
- u32 r0, r1, r2, r3;
- u32 s1, s2, s3;
- u32 h0, h1, h2, h3, h4, c;
- u64 d0, d1, d2, d3;
-
- r0 = st->r[0];
- r1 = st->r[1];
- r2 = st->r[2];
- r3 = st->r[3];
-
- s1 = r1 + (r1 >> 2);
- s2 = r2 + (r2 >> 2);
- s3 = r3 + (r3 >> 2);
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
- h3 = st->h[3];
- h4 = st->h[4];
-
- while (len >= POLY1305_BLOCK_SIZE) {
- /* h += m[i] */
- h0 = (u32)(d0 = (u64)h0 + le32_to_cpuvp(inp + 0));
- h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + le32_to_cpuvp(inp + 4));
- h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + le32_to_cpuvp(inp + 8));
- h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + le32_to_cpuvp(inp + 12));
- h4 += (u32)(d3 >> 32) + padbit;
-
- /* h *= r "%" p, where "%" stands for "partial remainder" */
- d0 = ((u64)h0 * r0) +
- ((u64)h1 * s3) +
- ((u64)h2 * s2) +
- ((u64)h3 * s1);
- d1 = ((u64)h0 * r1) +
- ((u64)h1 * r0) +
- ((u64)h2 * s3) +
- ((u64)h3 * s2) +
- (h4 * s1);
- d2 = ((u64)h0 * r2) +
- ((u64)h1 * r1) +
- ((u64)h2 * r0) +
- ((u64)h3 * s3) +
- (h4 * s2);
- d3 = ((u64)h0 * r3) +
- ((u64)h1 * r2) +
- ((u64)h2 * r1) +
- ((u64)h3 * r0) +
- (h4 * s3);
- h4 = (h4 * r0);
-
- /* last reduction step: */
- /* a) h4:h0 = h4<<128 + d3<<96 + d2<<64 + d1<<32 + d0 */
- h0 = (u32)d0;
- h1 = (u32)(d1 += d0 >> 32);
- h2 = (u32)(d2 += d1 >> 32);
- h3 = (u32)(d3 += d2 >> 32);
- h4 += (u32)(d3 >> 32);
- /* b) (h4:h0 += (h4:h0>>130) * 5) %= 2^130 */
- c = (h4 >> 2) + (h4 & ~3U);
- h4 &= 3;
- h0 += c;
- h1 += (c = CONSTANT_TIME_CARRY(h0,c));
- h2 += (c = CONSTANT_TIME_CARRY(h1,c));
- h3 += (c = CONSTANT_TIME_CARRY(h2,c));
- h4 += CONSTANT_TIME_CARRY(h3,c);
- /*
- * Occasional overflows to 3rd bit of h4 are taken care of
- * "naturally". If after this point we end up at the top of
- * this loop, then the overflow bit will be accounted for
- * in next iteration. If we end up in poly1305_emit, then
- * comparison to modulus below will still count as "carry
- * into 131st bit", so that properly reduced value will be
- * picked in conditional move.
- */
-
- inp += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- }
-
- st->h[0] = h0;
- st->h[1] = h1;
- st->h[2] = h2;
- st->h[3] = h3;
- st->h[4] = h4;
-#undef CONSTANT_TIME_CARRY
-}
-
-static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
-{
- struct poly1305_internal *st = (struct poly1305_internal *)ctx;
- __le32 *omac = (__force __le32 *)mac;
- u32 h0, h1, h2, h3, h4;
- u32 g0, g1, g2, g3, g4;
- u64 t;
- u32 mask;
-
- h0 = st->h[0];
- h1 = st->h[1];
- h2 = st->h[2];
- h3 = st->h[3];
- h4 = st->h[4];
-
- /* compare to modulus by computing h + -p */
- g0 = (u32)(t = (u64)h0 + 5);
- g1 = (u32)(t = (u64)h1 + (t >> 32));
- g2 = (u32)(t = (u64)h2 + (t >> 32));
- g3 = (u32)(t = (u64)h3 + (t >> 32));
- g4 = h4 + (u32)(t >> 32);
-
- /* if there was carry into 131st bit, h3:h0 = g3:g0 */
- mask = 0 - (g4 >> 2);
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- mask = ~mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
- h2 = (h2 & mask) | g2;
- h3 = (h3 & mask) | g3;
-
- /* mac = (h + nonce) % (2^128) */
- h0 = (u32)(t = (u64)h0 + nonce[0]);
- h1 = (u32)(t = (u64)h1 + (t >> 32) + nonce[1]);
- h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]);
- h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]);
-
- omac[0] = cpu_to_le32(h0);
- omac[1] = cpu_to_le32(h1);
- omac[2] = cpu_to_le32(h2);
- omac[3] = cpu_to_le32(h3);
-}
-#endif
-
-static void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE], bool have_simd)
-{
- ctx->nonce[0] = le32_to_cpuvp(&key[16]);
- ctx->nonce[1] = le32_to_cpuvp(&key[20]);
- ctx->nonce[2] = le32_to_cpuvp(&key[24]);
- ctx->nonce[3] = le32_to_cpuvp(&key[28]);
-
-#if defined(CONFIG_X86_64)
- poly1305_init_x86_64(ctx->opaque, key);
-#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- poly1305_init_arm(ctx->opaque, key);
-#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
- poly1305_init_mips(ctx->opaque, key);
-#else
- poly1305_init_generic(ctx->opaque, key);
-#endif
- ctx->num = 0;
-}
-
-static inline void poly1305_blocks(void *ctx, const u8 *inp, size_t len, u32 padbit, bool have_simd)
-{
-#if defined(CONFIG_X86_64)
-#ifdef CONFIG_AS_AVX512
- if(chacha20poly1305_use_avx512 && have_simd)
- poly1305_blocks_avx512(ctx, inp, len, padbit);
- else
-#endif
-#ifdef CONFIG_AS_AVX2
- if (chacha20poly1305_use_avx2 && have_simd)
- poly1305_blocks_avx2(ctx, inp, len, padbit);
- else
-#endif
-#ifdef CONFIG_AS_AVX
- if (chacha20poly1305_use_avx && have_simd)
- poly1305_blocks_avx(ctx, inp, len, padbit);
- else
-#endif
- poly1305_blocks_x86_64(ctx, inp, len, padbit);
-#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-#if defined(ARM_USE_NEON)
- if (chacha20poly1305_use_neon && have_simd)
- poly1305_blocks_neon(ctx, inp, len, padbit);
- else
-#endif
- poly1305_blocks_arm(ctx, inp, len, padbit);
-#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
- poly1305_blocks_mips(ctx, inp, len, padbit);
-#else
- poly1305_blocks_generic(ctx, inp, len, padbit);
-#endif
-}
-
-static inline void poly1305_emit(void *ctx, u8 mac[16], const u32 nonce[4], bool have_simd)
-{
-#if defined(CONFIG_X86_64)
-#ifdef CONFIG_AS_AVX512
- if(chacha20poly1305_use_avx512 && have_simd)
- poly1305_emit_avx(ctx, mac, nonce);
- else
-#endif
-#ifdef CONFIG_AS_AVX2
- if (chacha20poly1305_use_avx2 && have_simd)
- poly1305_emit_avx(ctx, mac, nonce);
- else
-#endif
-#ifdef CONFIG_AS_AVX
- if (chacha20poly1305_use_avx && have_simd)
- poly1305_emit_avx(ctx, mac, nonce);
- else
-#endif
- poly1305_emit_x86_64(ctx, mac, nonce);
-#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-#if defined(ARM_USE_NEON)
- if (chacha20poly1305_use_neon && have_simd)
- poly1305_emit_neon(ctx, mac, nonce);
- else
-#endif
- poly1305_emit_arm(ctx, mac, nonce);
-#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
- poly1305_emit_mips(ctx, mac, nonce);
-#else
- poly1305_emit_generic(ctx, mac, nonce);
-#endif
-}
-
-static void poly1305_update(struct poly1305_ctx *ctx, const u8 *inp, size_t len, bool have_simd)
-{
- const size_t num = ctx->num % POLY1305_BLOCK_SIZE;
- size_t rem;
-
- if (num) {
- rem = POLY1305_BLOCK_SIZE - num;
- if (len >= rem) {
- memcpy(ctx->data + num, inp, rem);
- poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1, have_simd);
- inp += rem;
- len -= rem;
- } else {
- /* Still not enough data to process a block. */
- memcpy(ctx->data + num, inp, len);
- ctx->num = num + len;
- return;
- }
- }
-
- rem = len % POLY1305_BLOCK_SIZE;
- len -= rem;
-
- if (len >= POLY1305_BLOCK_SIZE) {
- poly1305_blocks(ctx->opaque, inp, len, 1, have_simd);
- inp += len;
- }
-
- if (rem)
- memcpy(ctx->data, inp, rem);
-
- ctx->num = rem;
-}
-
-static void poly1305_finish(struct poly1305_ctx *ctx, u8 mac[16], bool have_simd)
-{
- size_t num = ctx->num % POLY1305_BLOCK_SIZE;
-
- if (num) {
- ctx->data[num++] = 1; /* pad bit */
- while (num < POLY1305_BLOCK_SIZE)
- ctx->data[num++] = 0;
- poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0, have_simd);
- }
-
- poly1305_emit(ctx->opaque, mac, ctx->nonce, have_simd);
-
- /* zero out the state */
- memzero_explicit(ctx, sizeof(*ctx));
-}
-
static const u8 pad0[16] = { 0 };
@@ -613,19 +30,20 @@ static inline void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size
const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN],
bool have_simd)
{
- __le64 len, le_nonce = cpu_to_le64(nonce);
+ __le64 len;
struct poly1305_ctx poly1305_state;
- struct chacha20_ctx chacha20_state = chacha20_initial_state(key, (u8 *)&le_nonce);
- u8 block0[CHACHA20_BLOCK_SIZE] = { 0 };
+ struct chacha20_ctx chacha20_state;
+ u8 block0[POLY1305_KEY_SIZE] = { 0 };
- chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd);
+ chacha20_init(&chacha20_state, key, nonce);
+ chacha20(&chacha20_state, block0, block0, sizeof(block0), have_simd);
poly1305_init(&poly1305_state, block0, have_simd);
memzero_explicit(block0, sizeof(block0));
poly1305_update(&poly1305_state, ad, ad_len, have_simd);
poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, have_simd);
- chacha20_crypt(&chacha20_state, dst, src, src_len, have_simd);
+ chacha20(&chacha20_state, dst, src, src_len, have_simd);
poly1305_update(&poly1305_state, dst, src_len, have_simd);
poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, have_simd);
@@ -657,15 +75,16 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, struct scatterlist *sr
const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN],
bool have_simd)
{
- __le64 len, le_nonce = cpu_to_le64(nonce);
+ __le64 len;
struct poly1305_ctx poly1305_state;
- struct chacha20_ctx chacha20_state = chacha20_initial_state(key, (u8 *)&le_nonce);
+ struct chacha20_ctx chacha20_state;
int ret = 0;
struct blkcipher_walk walk;
- u8 block0[CHACHA20_BLOCK_SIZE] = { 0 };
+ u8 block0[POLY1305_KEY_SIZE] = { 0 };
u8 mac[POLY1305_MAC_SIZE];
- chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd);
+ chacha20_init(&chacha20_state, key, nonce);
+ chacha20(&chacha20_state, block0, block0, sizeof(block0), have_simd);
poly1305_init(&poly1305_state, block0, have_simd);
memzero_explicit(block0, sizeof(block0));
@@ -678,12 +97,12 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, struct scatterlist *sr
while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
size_t chunk_len = rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE);
- chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd);
+ chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd);
poly1305_update(&poly1305_state, walk.dst.virt.addr, chunk_len, have_simd);
ret = blkcipher_walk_done(&chacha20_desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE);
}
if (walk.nbytes) {
- chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd);
+ chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd);
poly1305_update(&poly1305_state, walk.dst.virt.addr, walk.nbytes, have_simd);
ret = blkcipher_walk_done(&chacha20_desc, &walk, 0);
}
@@ -712,18 +131,19 @@ static inline bool __chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size
const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN],
bool have_simd)
{
- __le64 len, le_nonce = cpu_to_le64(nonce);
+ __le64 len;
struct poly1305_ctx poly1305_state;
- struct chacha20_ctx chacha20_state = chacha20_initial_state(key, (u8 *)&le_nonce);
+ struct chacha20_ctx chacha20_state;
int ret;
- u8 block0[CHACHA20_BLOCK_SIZE] = { 0 };
+ u8 block0[POLY1305_KEY_SIZE] = { 0 };
u8 mac[POLY1305_MAC_SIZE];
size_t dst_len;
if (unlikely(src_len < POLY1305_MAC_SIZE))
return false;
- chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd);
+ chacha20_init(&chacha20_state, key, nonce);
+ chacha20(&chacha20_state, block0, block0, sizeof(block0), have_simd);
poly1305_init(&poly1305_state, block0, have_simd);
memzero_explicit(block0, sizeof(block0));
@@ -745,7 +165,7 @@ static inline bool __chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size
ret = crypto_memneq(mac, src + dst_len, POLY1305_MAC_SIZE);
memzero_explicit(mac, POLY1305_MAC_SIZE);
if (likely(!ret))
- chacha20_crypt(&chacha20_state, dst, src, dst_len, have_simd);
+ chacha20(&chacha20_state, dst, src, dst_len, have_simd);
memzero_explicit(&chacha20_state, sizeof(chacha20_state));
@@ -769,19 +189,20 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, struct scatterlist *sr
const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN],
bool have_simd)
{
- __le64 len, le_nonce = cpu_to_le64(nonce);
+ __le64 len;
struct poly1305_ctx poly1305_state;
- struct chacha20_ctx chacha20_state = chacha20_initial_state(key, (u8 *)&le_nonce);
+ struct chacha20_ctx chacha20_state;
struct blkcipher_walk walk;
int ret = 0;
- u8 block0[CHACHA20_BLOCK_SIZE] = { 0 };
+ u8 block0[POLY1305_KEY_SIZE] = { 0 };
u8 read_mac[POLY1305_MAC_SIZE], computed_mac[POLY1305_MAC_SIZE];
size_t dst_len;
if (unlikely(src_len < POLY1305_MAC_SIZE))
return false;
- chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd);
+ chacha20_init(&chacha20_state, key, nonce);
+ chacha20(&chacha20_state, block0, block0, sizeof(block0), have_simd);
poly1305_init(&poly1305_state, block0, have_simd);
memzero_explicit(block0, sizeof(block0));
@@ -796,12 +217,12 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, struct scatterlist *sr
size_t chunk_len = rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE);
poly1305_update(&poly1305_state, walk.src.virt.addr, chunk_len, have_simd);
- chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd);
+ chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, have_simd);
ret = blkcipher_walk_done(&chacha20_desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE);
}
if (walk.nbytes) {
poly1305_update(&poly1305_state, walk.src.virt.addr, walk.nbytes, have_simd);
- chacha20_crypt(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd);
+ chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, have_simd);
ret = blkcipher_walk_done(&chacha20_desc, &walk, 0);
}
}
@@ -837,7 +258,7 @@ void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
u8 derived_key[CHACHA20POLY1305_KEYLEN] __aligned(16);
hchacha20(derived_key, nonce, key, have_simd);
- __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, le64_to_cpuvp(nonce + 16), derived_key, have_simd);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, le64_to_cpup((__le64 *)(nonce + 16)), derived_key, have_simd);
memzero_explicit(derived_key, CHACHA20POLY1305_KEYLEN);
chacha20poly1305_deinit_simd(have_simd);
}
@@ -851,11 +272,10 @@ bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
u8 derived_key[CHACHA20POLY1305_KEYLEN] __aligned(16);
hchacha20(derived_key, nonce, key, have_simd);
- ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, le64_to_cpuvp(nonce + 16), derived_key, have_simd);
+ ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, le64_to_cpup((__le64 *)(nonce + 16)), derived_key, have_simd);
memzero_explicit(derived_key, CHACHA20POLY1305_KEYLEN);
chacha20poly1305_deinit_simd(have_simd);
return ret;
}
#include "../selftest/chacha20poly1305.h"
-#include "../selftest/poly1305.h"
diff --git a/src/crypto/chacha20poly1305.h b/src/crypto/chacha20poly1305.h
index 62b48b9..39919cd 100644
--- a/src/crypto/chacha20poly1305.h
+++ b/src/crypto/chacha20poly1305.h
@@ -16,8 +16,6 @@ enum chacha20poly1305_lengths {
CHACHA20POLY1305_AUTHTAGLEN = 16
};
-void chacha20poly1305_fpu_init(void);
-
void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce, const u8 key[CHACHA20POLY1305_KEYLEN]);
@@ -87,7 +85,6 @@ static inline void chacha20poly1305_deinit_simd(bool was_on)
#ifdef DEBUG
bool chacha20poly1305_selftest(void);
-bool poly1305_selftest(void);
#endif
#endif /* _WG_CHACHA20POLY1305_H */
diff --git a/src/crypto/poly1305.c b/src/crypto/poly1305.c
new file mode 100644
index 0000000..e405141
--- /dev/null
+++ b/src/crypto/poly1305.c
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: OpenSSL OR (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+ */
+
+#include "poly1305.h"
+
+#include <linux/kernel.h>
+
+#if defined(CONFIG_X86_64)
+#include <asm/fpu/api.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/intel-family.h>
+asmlinkage void poly1305_init_x86_64(void *ctx, const u8 key[POLY1305_KEY_SIZE]);
+asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, size_t len, u32 padbit);
+asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_MAC_SIZE], const u32 nonce[4]);
+#ifdef CONFIG_AS_AVX
+asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_MAC_SIZE], const u32 nonce[4]);
+asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, size_t len, u32 padbit);
+#endif
+#ifdef CONFIG_AS_AVX2
+asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, size_t len, u32 padbit);
+#endif
+#ifdef CONFIG_AS_AVX512
+asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, size_t len, u32 padbit);
+#endif
+
+static bool poly1305_use_avx __ro_after_init;
+static bool poly1305_use_avx2 __ro_after_init;
+static bool poly1305_use_avx512 __ro_after_init;
+
+void __init poly1305_fpu_init(void)
+{
+#ifndef CONFIG_UML
+ poly1305_use_avx = boot_cpu_has(X86_FEATURE_AVX) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+ poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+#ifndef COMPAT_CANNOT_USE_AVX512
+ poly1305_use_avx512 = boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
+ boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X;
+#endif
+#endif
+}
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]);
+asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, size_t len, u32 padbit);
+asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]);
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (!defined(__LINUX_ARM_ARCH__) || __LINUX_ARM_ARCH__ >= 7)
+#define ARM_USE_NEON
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, size_t len, u32 padbit);
+asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]);
+#endif
+static bool poly1305_use_neon __ro_after_init;
+void __init poly1305_fpu_init(void)
+{
+#if defined(CONFIG_ARM64)
+ poly1305_use_neon = elf_hwcap & HWCAP_ASIMD;
+#elif defined(CONFIG_ARM)
+ poly1305_use_neon = elf_hwcap & HWCAP_NEON;
+#endif
+}
+#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
+asmlinkage void poly1305_init_mips(void *ctx, const u8 key[16]);
+asmlinkage void poly1305_blocks_mips(void *ctx, const u8 *inp, size_t len, u32 padbit);
+asmlinkage void poly1305_emit_mips(void *ctx, u8 mac[16], const u32 nonce[4]);
+void __init poly1305_fpu_init(void) { }
+#else
+void __init poly1305_fpu_init(void) { }
+#endif
+
+#if !(defined(CONFIG_X86_64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || (defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))))
+struct poly1305_internal {
+ u32 h[5];
+ u32 r[4];
+};
+
+static void poly1305_init_generic(void *ctx, const u8 key[16])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->h[3] = 0;
+ st->h[4] = 0;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ st->r[0] = le32_to_cpup((__le32 *)&key[ 0]) & 0x0fffffff;
+ st->r[1] = le32_to_cpup((__le32 *)&key[ 4]) & 0x0ffffffc;
+ st->r[2] = le32_to_cpup((__le32 *)&key[ 8]) & 0x0ffffffc;
+ st->r[3] = le32_to_cpup((__le32 *)&key[12]) & 0x0ffffffc;
+}
+
+static void poly1305_blocks_generic(void *ctx, const u8 *inp, size_t len, u32 padbit)
+{
+#define CONSTANT_TIME_CARRY(a,b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ u32 r0, r1, r2, r3;
+ u32 s1, s2, s3;
+ u32 h0, h1, h2, h3, h4, c;
+ u64 d0, d1, d2, d3;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+ r3 = st->r[3];
+
+ s1 = r1 + (r1 >> 2);
+ s2 = r2 + (r2 >> 2);
+ s3 = r3 + (r3 >> 2);
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ while (len >= POLY1305_BLOCK_SIZE) {
+ /* h += m[i] */
+ h0 = (u32)(d0 = (u64)h0 + le32_to_cpup((__le32 *)(inp + 0)));
+ h1 = (u32)(d1 = (u64)h1 + (d0 >> 32) + le32_to_cpup((__le32 *)(inp + 4)));
+ h2 = (u32)(d2 = (u64)h2 + (d1 >> 32) + le32_to_cpup((__le32 *)(inp + 8)));
+ h3 = (u32)(d3 = (u64)h3 + (d2 >> 32) + le32_to_cpup((__le32 *)(inp + 12)));
+ h4 += (u32)(d3 >> 32) + padbit;
+
+ /* h *= r "%" p, where "%" stands for "partial remainder" */
+ d0 = ((u64)h0 * r0) +
+ ((u64)h1 * s3) +
+ ((u64)h2 * s2) +
+ ((u64)h3 * s1);
+ d1 = ((u64)h0 * r1) +
+ ((u64)h1 * r0) +
+ ((u64)h2 * s3) +
+ ((u64)h3 * s2) +
+ (h4 * s1);
+ d2 = ((u64)h0 * r2) +
+ ((u64)h1 * r1) +
+ ((u64)h2 * r0) +
+ ((u64)h3 * s3) +
+ (h4 * s2);
+ d3 = ((u64)h0 * r3) +
+ ((u64)h1 * r2) +
+ ((u64)h2 * r1) +
+ ((u64)h3 * r0) +
+ (h4 * s3);
+ h4 = (h4 * r0);
+
+ /* last reduction step: */
+ /* a) h4:h0 = h4<<128 + d3<<96 + d2<<64 + d1<<32 + d0 */
+ h0 = (u32)d0;
+ h1 = (u32)(d1 += d0 >> 32);
+ h2 = (u32)(d2 += d1 >> 32);
+ h3 = (u32)(d3 += d2 >> 32);
+ h4 += (u32)(d3 >> 32);
+ /* b) (h4:h0 += (h4:h0>>130) * 5) %= 2^130 */
+ c = (h4 >> 2) + (h4 & ~3U);
+ h4 &= 3;
+ h0 += c;
+ h1 += (c = CONSTANT_TIME_CARRY(h0, c));
+ h2 += (c = CONSTANT_TIME_CARRY(h1, c));
+ h3 += (c = CONSTANT_TIME_CARRY(h2, c));
+ h4 += CONSTANT_TIME_CARRY(h3, c);
+ /*
+ * Occasional overflows to 3rd bit of h4 are taken care of
+ * "naturally". If after this point we end up at the top of
+ * this loop, then the overflow bit will be accounted for
+ * in next iteration. If we end up in poly1305_emit, then
+ * comparison to modulus below will still count as "carry
+ * into 131st bit", so that properly reduced value will be
+ * picked in conditional move.
+ */
+
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+ st->h[3] = h3;
+ st->h[4] = h4;
+#undef CONSTANT_TIME_CARRY
+}
+
+static void poly1305_emit_generic(void *ctx, u8 mac[16], const u32 nonce[4])
+{
+ struct poly1305_internal *st = (struct poly1305_internal *)ctx;
+ __le32 *omac = (__force __le32 *)mac;
+ u32 h0, h1, h2, h3, h4;
+ u32 g0, g1, g2, g3, g4;
+ u64 t;
+ u32 mask;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ /* compare to modulus by computing h + -p */
+ g0 = (u32)(t = (u64)h0 + 5);
+ g1 = (u32)(t = (u64)h1 + (t >> 32));
+ g2 = (u32)(t = (u64)h2 + (t >> 32));
+ g3 = (u32)(t = (u64)h3 + (t >> 32));
+ g4 = h4 + (u32)(t >> 32);
+
+ /* if there was carry into 131st bit, h3:h0 = g3:g0 */
+ mask = 0 - (g4 >> 2);
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+
+ /* mac = (h + nonce) % (2^128) */
+ h0 = (u32)(t = (u64)h0 + nonce[0]);
+ h1 = (u32)(t = (u64)h1 + (t >> 32) + nonce[1]);
+ h2 = (u32)(t = (u64)h2 + (t >> 32) + nonce[2]);
+ h3 = (u32)(t = (u64)h3 + (t >> 32) + nonce[3]);
+
+ omac[0] = cpu_to_le32(h0);
+ omac[1] = cpu_to_le32(h1);
+ omac[2] = cpu_to_le32(h2);
+ omac[3] = cpu_to_le32(h3);
+}
+#endif
+
+void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE], bool have_simd)
+{
+ ctx->nonce[0] = le32_to_cpup((__le32 *)&key[16]);
+ ctx->nonce[1] = le32_to_cpup((__le32 *)&key[20]);
+ ctx->nonce[2] = le32_to_cpup((__le32 *)&key[24]);
+ ctx->nonce[3] = le32_to_cpup((__le32 *)&key[28]);
+
+#if defined(CONFIG_X86_64)
+ poly1305_init_x86_64(ctx->opaque, key);
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ poly1305_init_arm(ctx->opaque, key);
+#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
+ poly1305_init_mips(ctx->opaque, key);
+#else
+ poly1305_init_generic(ctx->opaque, key);
+#endif
+ ctx->num = 0;
+}
+
+static inline void poly1305_blocks(void *ctx, const u8 *inp, size_t len, u32 padbit, bool have_simd)
+{
+#if defined(CONFIG_X86_64)
+#ifdef CONFIG_AS_AVX512
+ if (poly1305_use_avx512 && have_simd)
+ poly1305_blocks_avx512(ctx, inp, len, padbit);
+ else
+#endif
+#ifdef CONFIG_AS_AVX2
+ if (poly1305_use_avx2 && have_simd)
+ poly1305_blocks_avx2(ctx, inp, len, padbit);
+ else
+#endif
+#ifdef CONFIG_AS_AVX
+ if (poly1305_use_avx && have_simd)
+ poly1305_blocks_avx(ctx, inp, len, padbit);
+ else
+#endif
+ poly1305_blocks_x86_64(ctx, inp, len, padbit);
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#if defined(ARM_USE_NEON)
+ if (poly1305_use_neon && have_simd)
+ poly1305_blocks_neon(ctx, inp, len, padbit);
+ else
+#endif
+ poly1305_blocks_arm(ctx, inp, len, padbit);
+#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
+ poly1305_blocks_mips(ctx, inp, len, padbit);
+#else
+ poly1305_blocks_generic(ctx, inp, len, padbit);
+#endif
+}
+
+static inline void poly1305_emit(void *ctx, u8 mac[POLY1305_KEY_SIZE], const u32 nonce[4], bool have_simd)
+{
+#if defined(CONFIG_X86_64)
+#ifdef CONFIG_AS_AVX512
+ if (poly1305_use_avx512 && have_simd)
+ poly1305_emit_avx(ctx, mac, nonce);
+ else
+#endif
+#ifdef CONFIG_AS_AVX2
+ if (poly1305_use_avx2 && have_simd)
+ poly1305_emit_avx(ctx, mac, nonce);
+ else
+#endif
+#ifdef CONFIG_AS_AVX
+ if (poly1305_use_avx && have_simd)
+ poly1305_emit_avx(ctx, mac, nonce);
+ else
+#endif
+ poly1305_emit_x86_64(ctx, mac, nonce);
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#if defined(ARM_USE_NEON)
+ if (poly1305_use_neon && have_simd)
+ poly1305_emit_neon(ctx, mac, nonce);
+ else
+#endif
+ poly1305_emit_arm(ctx, mac, nonce);
+#elif defined(CONFIG_MIPS) && (defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2))
+ poly1305_emit_mips(ctx, mac, nonce);
+#else
+ poly1305_emit_generic(ctx, mac, nonce);
+#endif
+}
+
+void poly1305_update(struct poly1305_ctx *ctx, const u8 *inp, size_t len, bool have_simd)
+{
+ const size_t num = ctx->num % POLY1305_BLOCK_SIZE;
+ size_t rem;
+
+ if (num) {
+ rem = POLY1305_BLOCK_SIZE - num;
+ if (len >= rem) {
+ memcpy(ctx->data + num, inp, rem);
+ poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 1, have_simd);
+ inp += rem;
+ len -= rem;
+ } else {
+ /* Still not enough data to process a block. */
+ memcpy(ctx->data + num, inp, len);
+ ctx->num = num + len;
+ return;
+ }
+ }
+
+ rem = len % POLY1305_BLOCK_SIZE;
+ len -= rem;
+
+ if (len >= POLY1305_BLOCK_SIZE) {
+ poly1305_blocks(ctx->opaque, inp, len, 1, have_simd);
+ inp += len;
+ }
+
+ if (rem)
+ memcpy(ctx->data, inp, rem);
+
+ ctx->num = rem;
+}
+
+void poly1305_finish(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], bool have_simd)
+{
+ size_t num = ctx->num % POLY1305_BLOCK_SIZE;
+
+ if (num) {
+ ctx->data[num++] = 1; /* pad bit */
+ while (num < POLY1305_BLOCK_SIZE)
+ ctx->data[num++] = 0;
+ poly1305_blocks(ctx->opaque, ctx->data, POLY1305_BLOCK_SIZE, 0, have_simd);
+ }
+
+ poly1305_emit(ctx->opaque, mac, ctx->nonce, have_simd);
+
+ /* zero out the state */
+ memzero_explicit(ctx, sizeof(*ctx));
+}
+
+#include "../selftest/poly1305.h"
diff --git a/src/crypto/poly1305.h b/src/crypto/poly1305.h
new file mode 100644
index 0000000..21833be
--- /dev/null
+++ b/src/crypto/poly1305.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef _WG_POLY1305_H
+#define _WG_POLY1305_H
+
+#include <linux/types.h>
+
+enum poly1305_lengths {
+ POLY1305_BLOCK_SIZE = 16,
+ POLY1305_KEY_SIZE = 32,
+ POLY1305_MAC_SIZE = 16
+};
+
+struct poly1305_ctx {
+ u8 opaque[24 * sizeof(u64)];
+ u32 nonce[4];
+ u8 data[POLY1305_BLOCK_SIZE];
+ size_t num;
+} __aligned(8);
+
+void poly1305_fpu_init(void);
+
+void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE], bool have_simd);
+void poly1305_update(struct poly1305_ctx *ctx, const u8 *inp, size_t len, bool have_simd);
+void poly1305_finish(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE], bool have_simd);
+
+#ifdef DEBUG
+bool poly1305_selftest(void);
+#endif
+
+#endif /* _WG_POLY1305_H */
diff --git a/src/main.c b/src/main.c
index ebda16a..f1db64b 100644
--- a/src/main.c
+++ b/src/main.c
@@ -9,7 +9,8 @@
#include "queueing.h"
#include "ratelimiter.h"
#include "netlink.h"
-#include "crypto/chacha20poly1305.h"
+#include "crypto/chacha20.h"
+#include "crypto/poly1305.h"
#include "crypto/blake2s.h"
#include "crypto/curve25519.h"
#include "uapi/wireguard.h"
@@ -24,7 +25,8 @@ static int __init mod_init(void)
{
int ret;
- chacha20poly1305_fpu_init();
+ chacha20_fpu_init();
+ poly1305_fpu_init();
blake2s_fpu_init();
curve25519_fpu_init();
#ifdef DEBUG
diff --git a/src/selftest/chacha20poly1305.h b/src/selftest/chacha20poly1305.h
index a6a5598..efbb76b 100644
--- a/src/selftest/chacha20poly1305.h
+++ b/src/selftest/chacha20poly1305.h
@@ -1278,19 +1278,18 @@ static inline void chacha20poly1305_selftest_encrypt_bignonce(u8 *dst, const u8
bool have_simd = chacha20poly1305_init_simd();
__le64 len;
struct poly1305_ctx poly1305_state;
- struct chacha20_ctx chacha20_state = {{
- EXPAND_32_BYTE_K,
- le32_to_cpuvp(key + 0), le32_to_cpuvp(key + 4), le32_to_cpuvp(key + 8), le32_to_cpuvp(key + 12),
- le32_to_cpuvp(key + 16), le32_to_cpuvp(key + 20), le32_to_cpuvp(key + 24), le32_to_cpuvp(key + 28),
- 0, le32_to_cpuvp(nonce + 0), le32_to_cpuvp(nonce + 4), le32_to_cpuvp(nonce + 8)
- }};
- u8 block0[CHACHA20_BLOCK_SIZE] = { 0 };
+ struct chacha20_ctx chacha20_state;
+ u8 block0[POLY1305_KEY_SIZE] = { 0 };
- chacha20_crypt(&chacha20_state, block0, block0, sizeof(block0), have_simd);
+ chacha20_init(&chacha20_state, key, 0);
+ chacha20_state.counter[1] = le32_to_cpu(*(__le32 *)(nonce + 0));
+ chacha20_state.counter[2] = le32_to_cpu(*(__le32 *)(nonce + 4));
+ chacha20_state.counter[3] = le32_to_cpu(*(__le32 *)(nonce + 8));
+ chacha20(&chacha20_state, block0, block0, sizeof(block0), have_simd);
poly1305_init(&poly1305_state, block0, have_simd);
poly1305_update(&poly1305_state, ad, ad_len, have_simd);
poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf, have_simd);
- chacha20_crypt(&chacha20_state, dst, src, src_len, have_simd);
+ chacha20(&chacha20_state, dst, src, src_len, have_simd);
poly1305_update(&poly1305_state, dst, src_len, have_simd);
poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf, have_simd);
len = cpu_to_le64(ad_len);
@@ -1304,7 +1303,7 @@ static inline void chacha20poly1305_selftest_encrypt_bignonce(u8 *dst, const u8
static inline void chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, const size_t ad_len, const u8 *nonce, const size_t nonce_len, const u8 key[CHACHA20POLY1305_KEYLEN])
{
if (nonce_len == 8)
- chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, le64_to_cpu(*(__force __le64 *)nonce), key);
+ chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, le64_to_cpup((__force __le64 *)nonce), key);
else if (nonce_len == 12)
chacha20poly1305_selftest_encrypt_bignonce(dst, src, src_len, ad, ad_len, nonce, key);
else
diff --git a/src/selftest/poly1305.h b/src/selftest/poly1305.h
index 9428eba..41acf7c 100644
--- a/src/selftest/poly1305.h
+++ b/src/selftest/poly1305.h
@@ -6,6 +6,8 @@
#ifdef DEBUG
+#include "../crypto/chacha20poly1305.h"
+
struct poly1305_testdata {
size_t size;
const u8 data[1024];