diff options
author | Samuel Neves <sneves@dei.uc.pt> | 2019-05-04 17:14:09 +0100 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2019-05-29 01:23:24 +0200 |
commit | 377c3938c67b9f6515c12678e9a0ade703acf500 (patch) | |
tree | 6e2da59fef4d4e83f9869bdaab73504696b67603 /src/crypto/zinc/chacha20 | |
parent | 1de337ad9d8ce26cae57281b92033e7e07aff895 (diff) |
blake2s,chacha: latency tweak
In every odd-numbered round, instead of operating over the state
x00 x01 x02 x03
x05 x06 x07 x04
x10 x11 x08 x09
x15 x12 x13 x14
we operate over the rotated state
x03 x00 x01 x02
x04 x05 x06 x07
x09 x10 x11 x08
x14 x15 x12 x13
The advantage here is that this requires no changes to the
'x04 x05 x06 x07' row, which is in the critical path. This
results in a noticeable latency improvement of roughly R
cycles, for R diagonal rounds in the primitive.
In the case of BLAKE2s, which I also moved from requiring AVX
to only requiring SSSE3, we save approximately 30 cycles per
compression function call on Haswell and Skylake. In other
words, this is an improvement of ~0.6 cpb.
This idea was pointed out to me by Shunsuke Shimizu, though
it appears to have been around for longer.
Signed-off-by: Samuel Neves <sneves@dei.uc.pt>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/crypto/zinc/chacha20')
-rw-r--r-- | src/crypto/zinc/chacha20/chacha20-arm.pl | 6 | ||||
-rw-r--r-- | src/crypto/zinc/chacha20/chacha20-arm64.pl | 6 | ||||
-rw-r--r-- | src/crypto/zinc/chacha20/chacha20-x86_64.pl | 48 |
3 files changed, 30 insertions, 30 deletions
diff --git a/src/crypto/zinc/chacha20/chacha20-arm.pl b/src/crypto/zinc/chacha20/chacha20-arm.pl index 6a7d62c..6785383 100644 --- a/src/crypto/zinc/chacha20/chacha20-arm.pl +++ b/src/crypto/zinc/chacha20/chacha20-arm.pl @@ -686,9 +686,9 @@ my ($a,$b,$c,$d,$t)=@_; "&vshr_u32 ($b,$t,25)", "&vsli_32 ($b,$t,7)", - "&vext_8 ($c,$c,$c,8)", - "&vext_8 ($b,$b,$b,$odd?12:4)", - "&vext_8 ($d,$d,$d,$odd?4:12)" + "&vext_8 ($a,$a,$a,$odd?4:12)", + "&vext_8 ($d,$d,$d,8)", + "&vext_8 ($c,$c,$c,$odd?12:4)" ); } diff --git a/src/crypto/zinc/chacha20/chacha20-arm64.pl b/src/crypto/zinc/chacha20/chacha20-arm64.pl index fc63cc8..ac14a99 100644 --- a/src/crypto/zinc/chacha20/chacha20-arm64.pl +++ b/src/crypto/zinc/chacha20/chacha20-arm64.pl @@ -378,9 +378,9 @@ my ($a,$b,$c,$d,$t)=@_; "&ushr ('$b','$t',25)", "&sli ('$b','$t',7)", - "&ext ('$c','$c','$c',8)", - "&ext ('$d','$d','$d',$odd?4:12)", - "&ext ('$b','$b','$b',$odd?12:4)" + "&ext ('$a','$a','$a',$odd?4:12)", + "&ext ('$d','$d','$d',8)", + "&ext ('$c','$c','$c',$odd?12:4)" ); } diff --git a/src/crypto/zinc/chacha20/chacha20-x86_64.pl b/src/crypto/zinc/chacha20/chacha20-x86_64.pl index 38532f8..116c16e 100644 --- a/src/crypto/zinc/chacha20/chacha20-x86_64.pl +++ b/src/crypto/zinc/chacha20/chacha20-x86_64.pl @@ -525,15 +525,15 @@ $code.=<<___; 1: ___ &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); &nop (); &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); &dec ($counter); &jnz ("1b"); @@ -600,15 +600,15 @@ $code.=<<___; .Loop_ssse3: ___ &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); &nop (); &SSSE3ROUND(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); &dec ($counter); &jnz (".Loop_ssse3"); @@ -770,20 +770,20 @@ $code.=<<___; .Loop_128: ___ &SSSE3ROUND_2x(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b00111001); - &pshufd ($d,$d,0b10010011); - &pshufd ($c1,$c1,0b01001110); - &pshufd ($b1,$b1,0b00111001); - &pshufd ($d1,$d1,0b10010011); + &pshufd ($a,$a,0b10010011); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b00111001); + &pshufd ($a1,$a1,0b10010011); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b00111001); &SSSE3ROUND_2x(); - &pshufd ($c,$c,0b01001110); - &pshufd ($b,$b,0b10010011); - &pshufd ($d,$d,0b00111001); - &pshufd ($c1,$c1,0b01001110); - &pshufd ($b1,$b1,0b10010011); - &pshufd ($d1,$d1,0b00111001); + &pshufd ($a,$a,0b00111001); + &pshufd ($d,$d,0b01001110); + &pshufd ($c,$c,0b10010011); + &pshufd ($a1,$a1,0b00111001); + &pshufd ($d1,$d1,0b01001110); + &pshufd ($c1,$c1,0b10010011); &dec ($counter); &jnz (".Loop_128"); |