summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@arm.com>2020-11-24 17:33:31 +0100
committerTomas Mraz <tmraz@fedoraproject.org>2020-12-01 14:27:48 +0100
commit409c59e8f44ae56f2587cdd8a7ce611d0e3d91d9 (patch)
tree5cfe476a930af0e4d35304facdcc94b01d11115d
parent0c60676338f1e25faaa44117238d8e35e507feee (diff)
downloadopenssl-409c59e8f44ae56f2587cdd8a7ce611d0e3d91d9.tar.gz
aes/asm/aesv8-armx.pl: avoid 32-bit lane assignment in CTR mode
ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are affected by silicon errata #1742098 [0] and #1655431 [1], respectively, where the second instruction of a AES instruction pair may execute twice if an interrupt is taken right after the first instruction consumes an input register of which a single 32-bit lane has been updated the last time it was modified. This is not such a rare occurrence as it may seem: in counter mode, only the least significant 32-bit word is incremented in the absence of a carry, which makes our counter mode implementation susceptible to these errata. So let's shuffle the counter assignments around a bit so that the most recent updates when the AES instruction pair executes are 128-bit wide. [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice Signed-off-by: Ard Biesheuvel <ard.biesheuvel@arm.com> Reviewed-by: Paul Dale <paul.dale@oracle.com> Reviewed-by: Tomas Mraz <tmraz@fedoraproject.org> (Merged from https://github.com/openssl/openssl/pull/13571) (cherry picked from commit 26217510d21cd4d5928db8bff41c6756a7c7a636)
-rwxr-xr-xcrypto/aes/asm/aesv8-armx.pl28
1 files changed, 14 insertions, 14 deletions
diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl
index f42f7bd1df..2b0e982996 100755
--- a/crypto/aes/asm/aesv8-armx.pl
+++ b/crypto/aes/asm/aesv8-armx.pl
@@ -740,17 +740,17 @@ $code.=<<___;
#ifndef __ARMEB__
rev $ctr, $ctr
#endif
- vorr $dat1,$dat0,$dat0
add $tctr1, $ctr, #1
- vorr $dat2,$dat0,$dat0
- add $ctr, $ctr, #2
vorr $ivec,$dat0,$dat0
rev $tctr1, $tctr1
- vmov.32 ${dat1}[3],$tctr1
+ vmov.32 ${ivec}[3],$tctr1
+ add $ctr, $ctr, #2
+ vorr $dat1,$ivec,$ivec
b.ls .Lctr32_tail
rev $tctr2, $ctr
+ vmov.32 ${ivec}[3],$tctr2
sub $len,$len,#3 // bias
- vmov.32 ${dat2}[3],$tctr2
+ vorr $dat2,$ivec,$ivec
b .Loop3x_ctr32
.align 4
@@ -777,11 +777,11 @@ $code.=<<___;
aese $dat1,q8
aesmc $tmp1,$dat1
vld1.8 {$in0},[$inp],#16
- vorr $dat0,$ivec,$ivec
+ add $tctr0,$ctr,#1
aese $dat2,q8
aesmc $dat2,$dat2
vld1.8 {$in1},[$inp],#16
- vorr $dat1,$ivec,$ivec
+ rev $tctr0,$tctr0
aese $tmp0,q9
aesmc $tmp0,$tmp0
aese $tmp1,q9
@@ -790,8 +790,6 @@ $code.=<<___;
mov $key_,$key
aese $dat2,q9
aesmc $tmp2,$dat2
- vorr $dat2,$ivec,$ivec
- add $tctr0,$ctr,#1
aese $tmp0,q12
aesmc $tmp0,$tmp0
aese $tmp1,q12
@@ -807,20 +805,22 @@ $code.=<<___;
aese $tmp1,q13
aesmc $tmp1,$tmp1
veor $in2,$in2,$rndlast
- rev $tctr0,$tctr0
+ vmov.32 ${ivec}[3], $tctr0
aese $tmp2,q13
aesmc $tmp2,$tmp2
- vmov.32 ${dat0}[3], $tctr0
+ vorr $dat0,$ivec,$ivec
rev $tctr1,$tctr1
aese $tmp0,q14
aesmc $tmp0,$tmp0
+ vmov.32 ${ivec}[3], $tctr1
+ rev $tctr2,$ctr
aese $tmp1,q14
aesmc $tmp1,$tmp1
- vmov.32 ${dat1}[3], $tctr1
- rev $tctr2,$ctr
+ vorr $dat1,$ivec,$ivec
+ vmov.32 ${ivec}[3], $tctr2
aese $tmp2,q14
aesmc $tmp2,$tmp2
- vmov.32 ${dat2}[3], $tctr2
+ vorr $dat2,$ivec,$ivec
subs $len,$len,#3
aese $tmp0,q15
aese $tmp1,q15