diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2019-02-03 01:43:52 +0100 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2019-02-03 18:27:33 +0100 |
commit | a3008c17fc41e1d6c1b95d518ff9c770be89c333 (patch) | |
tree | 326155c4d56cbd17338fa5b8be36b82f5b8946cf /src/crypto/zinc | |
parent | 75f476905a1bcd013c8bee07282fd7fb1dca738e (diff) |
chacha20poly1305: permit unaligned strides on certain platforms
The map allocations required to fix this are mostly slower than
unaligned paths.
Reported-by: Louis Sautier <sbraz@gentoo.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/crypto/zinc')
-rw-r--r-- | src/crypto/zinc/chacha20poly1305.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/src/crypto/zinc/chacha20poly1305.c b/src/crypto/zinc/chacha20poly1305.c index 28b9880..0001c92 100644 --- a/src/crypto/zinc/chacha20poly1305.c +++ b/src/crypto/zinc/chacha20poly1305.c @@ -20,18 +20,14 @@ static const u8 pad0[16] = { 0 }; -static struct crypto_alg chacha20_alg = { - .cra_blocksize = 1, - .cra_alignmask = sizeof(u32) - 1 -}; -static struct crypto_blkcipher chacha20_cipher = { - .base = { - .__crt_alg = &chacha20_alg - } -}; -static struct blkcipher_desc chacha20_desc = { - .tfm = &chacha20_cipher -}; +static struct blkcipher_desc desc = { .tfm = &(struct crypto_blkcipher){ + .base = { .__crt_alg = &(struct crypto_alg){ + .cra_blocksize = 1, +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + .cra_alignmask = sizeof(u32) - 1 +#endif + } } +} }; static inline void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, @@ -114,7 +110,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, if (likely(src_len)) { blkcipher_walk_init(&walk, dst, src, src_len); - ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, + ret = blkcipher_walk_virt_block(&desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = @@ -125,7 +121,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, poly1305_update(&poly1305_state, walk.dst.virt.addr, chunk_len, simd_context); simd_relax(simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, + ret = blkcipher_walk_done(&desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { @@ -133,7 +129,7 @@ bool chacha20poly1305_encrypt_sg(struct scatterlist *dst, walk.src.virt.addr, walk.nbytes, simd_context); poly1305_update(&poly1305_state, walk.dst.virt.addr, walk.nbytes, simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); + ret = blkcipher_walk_done(&desc, &walk, 0); } } if (unlikely(ret)) @@ -257,7 +253,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, dst_len = src_len - POLY1305_MAC_SIZE; if (likely(dst_len)) { blkcipher_walk_init(&walk, dst, src, dst_len); - ret = blkcipher_walk_virt_block(&chacha20_desc, &walk, + ret = blkcipher_walk_virt_block(&desc, &walk, CHACHA20_BLOCK_SIZE); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { size_t chunk_len = @@ -268,7 +264,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, chunk_len, simd_context); simd_relax(simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, + ret = blkcipher_walk_done(&desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { @@ -276,7 +272,7 @@ bool chacha20poly1305_decrypt_sg(struct scatterlist *dst, walk.nbytes, simd_context); chacha20(&chacha20_state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, simd_context); - ret = blkcipher_walk_done(&chacha20_desc, &walk, 0); + ret = blkcipher_walk_done(&desc, &walk, 0); } } if (unlikely(ret)) |