summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorJosh Soref <jsoref@users.noreply.github.com>2017-11-11 19:03:10 -0500
committerRich Salz <rsalz@openssl.org>2017-11-11 19:03:10 -0500
commit46f4e1bec51dc96fa275c168752aa34359d9ee51 (patch)
treec80b737d1fff479fd88f6c41175187ebad868299 /crypto
parentb4d0fa49d9d1a43792e58b0c8066bb23b9e53ef4 (diff)
downloadopenssl-46f4e1bec51dc96fa275c168752aa34359d9ee51.tar.gz
Many spelling fixes/typo's corrected.
Around 138 distinct errors found and fixed; thanks! Reviewed-by: Kurt Roeckx <kurt@roeckx.be> Reviewed-by: Tim Hudson <tjh@openssl.org> Reviewed-by: Rich Salz <rsalz@openssl.org> (Merged from https://github.com/openssl/openssl/pull/3459)
Diffstat (limited to 'crypto')
-rwxr-xr-xcrypto/aes/asm/aes-586.pl16
-rw-r--r--crypto/aes/asm/aes-mips.pl2
-rw-r--r--crypto/aes/asm/aes-parisc.pl2
-rw-r--r--crypto/aes/asm/aes-s390x.pl2
-rwxr-xr-xcrypto/aes/asm/aes-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesfx-sparcv9.pl2
-rw-r--r--crypto/aes/asm/aesni-mb-x86_64.pl8
-rw-r--r--crypto/aes/asm/aesni-x86.pl2
-rw-r--r--crypto/aes/asm/aesni-x86_64.pl16
-rw-r--r--crypto/aes/asm/aest4-sparcv9.pl2
-rwxr-xr-xcrypto/aes/asm/aesv8-armx.pl4
-rwxr-xr-xcrypto/aes/asm/vpaes-armv8.pl4
-rw-r--r--crypto/arm_arch.h2
-rw-r--r--crypto/asn1/standard_methods.h2
-rw-r--r--crypto/bn/asm/c64xplus-gf2m.pl2
-rw-r--r--crypto/bn/asm/ia64.S10
-rw-r--r--crypto/bn/asm/parisc-mont.pl2
-rw-r--r--crypto/bn/asm/ppc-mont.pl2
-rw-r--r--crypto/bn/asm/ppc64-mont.pl4
-rw-r--r--crypto/bn/asm/s390x-gf2m.pl2
-rw-r--r--crypto/bn/asm/sparcv8.S10
-rw-r--r--crypto/bn/asm/sparcv8plus.S10
-rw-r--r--crypto/bn/asm/sparcv9-mont.pl2
-rwxr-xr-xcrypto/bn/asm/sparcv9a-mont.pl2
-rw-r--r--crypto/bn/asm/vis3-mont.pl2
-rw-r--r--crypto/bn/asm/x86-gf2m.pl2
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl2
-rw-r--r--crypto/bn/asm/x86_64-gf2m.pl2
-rwxr-xr-xcrypto/bn/asm/x86_64-mont.pl4
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl4
-rw-r--r--crypto/c64xpluscpuid.pl2
-rw-r--r--crypto/camellia/asm/cmllt4-sparcv9.pl4
-rwxr-xr-xcrypto/chacha/asm/chacha-c64xplus.pl2
-rw-r--r--crypto/des/asm/des-586.pl2
-rw-r--r--crypto/des/asm/des_enc.m416
-rw-r--r--crypto/des/cfb_enc.c2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-armv4.pl4
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-avx2.pl4
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-ppc64.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-sparcv9.pl6
-rw-r--r--crypto/ec/ecdsa_ossl.c2
-rw-r--r--crypto/ec/ecp_nistp224.c2
-rw-r--r--crypto/ec/ecp_oct.c2
-rw-r--r--crypto/engine/README6
-rw-r--r--crypto/include/internal/aria.h4
-rw-r--r--crypto/include/internal/evp_int.h2
-rwxr-xr-xcrypto/modes/asm/ghash-ia64.pl2
-rw-r--r--crypto/modes/asm/ghash-parisc.pl2
-rw-r--r--crypto/modes/asm/ghash-x86.pl2
-rwxr-xr-xcrypto/modes/asm/ghashp8-ppc.pl2
-rw-r--r--crypto/modes/asm/ghashv8-armx.pl8
-rw-r--r--crypto/objects/README2
-rw-r--r--crypto/objects/objects.txt4
-rwxr-xr-xcrypto/perlasm/x86_64-xlate.pl6
-rwxr-xr-xcrypto/poly1305/asm/poly1305-x86.pl2
-rwxr-xr-xcrypto/poly1305/asm/poly1305-x86_64.pl6
-rw-r--r--crypto/rc4/asm/rc4-586.pl2
-rwxr-xr-xcrypto/rc4/asm/rc4-x86_64.pl2
-rw-r--r--crypto/rsa/rsa_pmeth.c2
-rw-r--r--crypto/sha/asm/sha1-586.pl4
-rw-r--r--crypto/sha/asm/sha1-mb-x86_64.pl10
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl6
-rw-r--r--crypto/sha/asm/sha256-586.pl2
-rw-r--r--crypto/sha/asm/sha256-mb-x86_64.pl8
-rw-r--r--crypto/sha/asm/sha512-586.pl2
-rw-r--r--crypto/sha/asm/sha512-armv8.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-ppc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512p8-ppc.pl2
-rw-r--r--crypto/sha/keccak1600.c2
-rw-r--r--crypto/sha/sha_locl.h2
-rw-r--r--crypto/x86cpuid.pl2
72 files changed, 139 insertions, 139 deletions
diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl
index 20c19e98bf..29059edf8b 100755
--- a/crypto/aes/asm/aes-586.pl
+++ b/crypto/aes/asm/aes-586.pl
@@ -55,8 +55,8 @@
# better performance on most recent µ-archs...
#
# Third version adds AES_cbc_encrypt implementation, which resulted in
-# up to 40% performance imrovement of CBC benchmark results. 40% was
-# observed on P4 core, where "overall" imrovement coefficient, i.e. if
+# up to 40% performance improvement of CBC benchmark results. 40% was
+# observed on P4 core, where "overall" improvement coefficient, i.e. if
# compared to PIC generated by GCC and in CBC mode, was observed to be
# as large as 4x:-) CBC performance is virtually identical to ECB now
# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
@@ -159,7 +159,7 @@
# combinations then attack becomes infeasible. This is why revised
# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
# of data is to be processed in one stroke. The current size limit of
-# 512 bytes is chosen to provide same [diminishigly low] probability
+# 512 bytes is chosen to provide same [diminishingly low] probability
# for cache-line to remain untouched in large chunk operation with
# large S-box as for single block operation with compact S-box and
# surely needs more careful consideration...
@@ -171,12 +171,12 @@
# yield execution to process performing AES just before timer fires
# off the scheduler, immediately regain control of CPU and analyze the
# cache state. For this attack to be efficient attacker would have to
-# effectively slow down the operation by several *orders* of magnitute,
+# effectively slow down the operation by several *orders* of magnitude,
# by ratio of time slice to duration of handful of AES rounds, which
# unlikely to remain unnoticed. Not to mention that this also means
-# that he would spend correspondigly more time to collect enough
+# that he would spend correspondingly more time to collect enough
# statistical data to mount the attack. It's probably appropriate to
-# say that if adeversary reckons that this attack is beneficial and
+# say that if adversary reckons that this attack is beneficial and
# risks to be noticed, you probably have larger problems having him
# mere opportunity. In other words suggested code design expects you
# to preclude/mitigate this attack by overall system security design.
@@ -240,7 +240,7 @@ $small_footprint=1; # $small_footprint=1 code is ~5% slower [on
# contention and in hope to "collect" 5% back
# in real-life applications...
-$vertical_spin=0; # shift "verticaly" defaults to 0, because of
+$vertical_spin=0; # shift "vertically" defaults to 0, because of
# its proof-of-concept status...
# Note that there is no decvert(), as well as last encryption round is
# performed with "horizontal" shifts. This is because this "vertical"
@@ -1606,7 +1606,7 @@ sub decstep()
# no instructions are reordered, as performance appears
# optimal... or rather that all attempts to reorder didn't
# result in better performance [which by the way is not a
- # bit lower than ecryption].
+ # bit lower than encryption].
if($i==3) { &mov ($key,$__key); }
else { &mov ($out,$s[0]); }
&and ($out,0xFF);
diff --git a/crypto/aes/asm/aes-mips.pl b/crypto/aes/asm/aes-mips.pl
index ba3e4545df..d81d76d77a 100644
--- a/crypto/aes/asm/aes-mips.pl
+++ b/crypto/aes/asm/aes-mips.pl
@@ -120,7 +120,7 @@ my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
my ($key0,$cnt)=($gp,$fp);
-# instuction ordering is "stolen" from output from MIPSpro assembler
+# instruction ordering is "stolen" from output from MIPSpro assembler
# invoked with -mips3 -O3 arguments...
$code.=<<___;
.align 5
diff --git a/crypto/aes/asm/aes-parisc.pl b/crypto/aes/asm/aes-parisc.pl
index fb754eb460..b688ab3050 100644
--- a/crypto/aes/asm/aes-parisc.pl
+++ b/crypto/aes/asm/aes-parisc.pl
@@ -1015,7 +1015,7 @@ ___
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/ge;
- # translate made up instructons: _ror, _srm
+ # translate made up instructions: _ror, _srm
s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/ or
s/_srm(\s+%r[0-9]+),([0-9]+),/
diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl
index 1495917d26..0ef1f6b50a 100644
--- a/crypto/aes/asm/aes-s390x.pl
+++ b/crypto/aes/asm/aes-s390x.pl
@@ -44,7 +44,7 @@
# minimize/avoid Address Generation Interlock hazard and to favour
# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
# almost 50% on z9. The gain is smaller on z10, because being dual-
-# issue z10 makes it improssible to eliminate the interlock condition:
+# issue z10 makes it impossible to eliminate the interlock condition:
# critial path is not long enough. Yet it spends ~24 cycles per byte
# processed with 128-bit key.
#
diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl
index c24a5510bd..4d1dc9c701 100755
--- a/crypto/aes/asm/aes-x86_64.pl
+++ b/crypto/aes/asm/aes-x86_64.pl
@@ -2018,7 +2018,7 @@ AES_cbc_encrypt:
lea ($key,%rax),%rax
mov %rax,$keyend
- # pick Te4 copy which can't "overlap" with stack frame or key scdedule
+ # pick Te4 copy which can't "overlap" with stack frame or key schedule
lea 2048($sbox),$sbox
lea 768-8(%rsp),%rax
sub $sbox,%rax
diff --git a/crypto/aes/asm/aesfx-sparcv9.pl b/crypto/aes/asm/aesfx-sparcv9.pl
index 04b3cf7116..9ddf0b4b00 100644
--- a/crypto/aes/asm/aesfx-sparcv9.pl
+++ b/crypto/aes/asm/aesfx-sparcv9.pl
@@ -22,7 +22,7 @@
# April 2016
#
# Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
-# that parallelizeable nature of CBC decrypt and CTR is not utilized
+# that parallelizable nature of CBC decrypt and CTR is not utilized
# yet. CBC encrypt on the other hand is as good as it can possibly
# get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
# This is ~6x faster than pure software implementation...
diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl
index c4151feb7e..1f356d2d3f 100644
--- a/crypto/aes/asm/aesni-mb-x86_64.pl
+++ b/crypto/aes/asm/aesni-mb-x86_64.pl
@@ -1325,10 +1325,10 @@ se_handler:
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
- mov %r15,240($context) # restore cotnext->R15
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
lea -56-10*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl
index 81c1cd651b..b351fca28e 100644
--- a/crypto/aes/asm/aesni-x86.pl
+++ b/crypto/aes/asm/aesni-x86.pl
@@ -239,7 +239,7 @@ sub aesni_generate1 # fully unrolled loop
# can schedule aes[enc|dec] every cycle optimal interleave factor
# equals to corresponding instructions latency. 8x is optimal for
# * Bridge, but it's unfeasible to accommodate such implementation
-# in XMM registers addreassable in 32-bit mode and therefore maximum
+# in XMM registers addressable in 32-bit mode and therefore maximum
# of 6x is used instead...
sub aesni_generate2
diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl
index c5c3614eee..2a202c53e5 100644
--- a/crypto/aes/asm/aesni-x86_64.pl
+++ b/crypto/aes/asm/aesni-x86_64.pl
@@ -60,7 +60,7 @@
# identical to CBC, because CBC-MAC is essentially CBC encrypt without
# saving output. CCM CTR "stays invisible," because it's neatly
# interleaved wih CBC-MAC. This provides ~30% improvement over
-# "straghtforward" CCM implementation with CTR and CBC-MAC performed
+# "straightforward" CCM implementation with CTR and CBC-MAC performed
# disjointly. Parallelizable modes practically achieve the theoretical
# limit.
#
@@ -143,14 +143,14 @@
# asymptotic, if it can be surpassed, isn't it? What happens there?
# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
# magic is responsible for this. Processor overlaps not only the
-# additional instructions with AES ones, but even AES instuctions
+# additional instructions with AES ones, but even AES instructions
# processing adjacent triplets of independent blocks. In the 6x case
# additional instructions still claim disproportionally small amount
# of additional cycles, but in 8x case number of instructions must be
# a tad too high for out-of-order logic to cope with, and AES unit
# remains underutilized... As you can see 8x interleave is hardly
# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
-# utilizies 6x interleave because of limited register bank capacity.
+# utilizes 6x interleave because of limited register bank capacity.
#
# Higher interleave factors do have negative impact on Westmere
# performance. While for ECB mode it's negligible ~1.5%, other
@@ -1550,7 +1550,7 @@ $code.=<<___;
sub \$8,$len
jnc .Lctr32_loop8 # loop if $len-=8 didn't borrow
- add \$8,$len # restore real remainig $len
+ add \$8,$len # restore real remaining $len
jz .Lctr32_done # done if ($len==0)
lea -0x80($key),$key
@@ -1667,7 +1667,7 @@ $code.=<<___;
movups $inout2,0x20($out) # $len was 3, stop store
.Lctr32_done:
- xorps %xmm0,%xmm0 # clear regiser bank
+ xorps %xmm0,%xmm0 # clear register bank
xor $key0,$key0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
@@ -1856,7 +1856,7 @@ $code.=<<___;
lea `16*6`($inp),$inp
pxor $twmask,$inout5
- pxor $twres,@tweak[0] # calclulate tweaks^round[last]
+ pxor $twres,@tweak[0] # calculate tweaks^round[last]
aesenc $rndkey1,$inout4
pxor $twres,@tweak[1]
movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^round[last]
@@ -2342,7 +2342,7 @@ $code.=<<___;
lea `16*6`($inp),$inp
pxor $twmask,$inout5
- pxor $twres,@tweak[0] # calclulate tweaks^round[last]
+ pxor $twres,@tweak[0] # calculate tweaks^round[last]
aesdec $rndkey1,$inout4
pxor $twres,@tweak[1]
movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
@@ -4515,7 +4515,7 @@ __aesni_set_encrypt_key:
.align 16
.L14rounds:
- movups 16($inp),%xmm2 # remaning half of *userKey
+ movups 16($inp),%xmm2 # remaining half of *userKey
mov \$13,$bits # 14 rounds for 256
lea 16(%rax),%rax
cmp \$`1<<28`,%r10d # AVX, but no XOP
diff --git a/crypto/aes/asm/aest4-sparcv9.pl b/crypto/aes/asm/aest4-sparcv9.pl
index 8d2b33d73e..54d0c58821 100644
--- a/crypto/aes/asm/aest4-sparcv9.pl
+++ b/crypto/aes/asm/aest4-sparcv9.pl
@@ -44,7 +44,7 @@
# instructions with those on critical path. Amazing!
#
# As with Intel AES-NI, question is if it's possible to improve
-# performance of parallelizeable modes by interleaving round
+# performance of parallelizable modes by interleaving round
# instructions. Provided round instruction latency and throughput
# optimal interleave factor is 2. But can we expect 2x performance
# improvement? Well, as round instructions can be issued one per
diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl
index a7947af486..385b31fd54 100755
--- a/crypto/aes/asm/aesv8-armx.pl
+++ b/crypto/aes/asm/aesv8-armx.pl
@@ -929,7 +929,7 @@ if ($flavour =~ /64/) { ######## 64-bit code
s/^(\s+)v/$1/o or # strip off v prefix
s/\bbx\s+lr\b/ret/o;
- # fix up remainig legacy suffixes
+ # fix up remaining legacy suffixes
s/\.[ui]?8//o;
m/\],#8/o and s/\.16b/\.8b/go;
s/\.[ui]?32//o and s/\.16b/\.4s/go;
@@ -988,7 +988,7 @@ if ($flavour =~ /64/) { ######## 64-bit code
s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
s/\/\/\s?/@ /o; # new->old style commentary
- # fix up remainig new-style suffixes
+ # fix up remaining new-style suffixes
s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
s/\],#[0-9]+/]!/o;
diff --git a/crypto/aes/asm/vpaes-armv8.pl b/crypto/aes/asm/vpaes-armv8.pl
index 2e704a2124..5131e13a09 100755
--- a/crypto/aes/asm/vpaes-armv8.pl
+++ b/crypto/aes/asm/vpaes-armv8.pl
@@ -31,7 +31,7 @@
# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
#
-# (*) ECB denotes approximate result for parallelizeable modes
+# (*) ECB denotes approximate result for parallelizable modes
# such as CBC decrypt, CTR, etc.;
# (**) these results are worse than scalar compiler-generated
# code, but it's constant-time and therefore preferred;
@@ -137,7 +137,7 @@ _vpaes_consts:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
-.asciz "Vector Permutaion AES for ARMv8, Mike Hamburg (Stanford University)"
+.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
.size _vpaes_consts,.-_vpaes_consts
.align 6
___
diff --git a/crypto/arm_arch.h b/crypto/arm_arch.h
index 3fc9e69b1c..8b41408296 100644
--- a/crypto/arm_arch.h
+++ b/crypto/arm_arch.h
@@ -28,7 +28,7 @@
# endif
/*
* Why doesn't gcc define __ARM_ARCH__? Instead it defines
- * bunch of below macros. See all_architectires[] table in
+ * bunch of below macros. See all_architectures[] table in
* gcc/config/arm/arm.c. On a side note it defines
* __ARMEL__/__ARMEB__ for little-/big-endian.
*/
diff --git a/crypto/asn1/standard_methods.h b/crypto/asn1/standard_methods.h
index f5fa128346..d366aa090a 100644
--- a/crypto/asn1/standard_methods.h
+++ b/crypto/asn1/standard_methods.h
@@ -8,7 +8,7 @@
*/
/*
- * This table MUST be kept in ascening order of the NID each method
+ * This table MUST be kept in ascending order of the NID each method
* represents (corresponding to the pkey_id field) as OBJ_bsearch
* is used to search it.
*/
diff --git a/crypto/bn/asm/c64xplus-gf2m.pl b/crypto/bn/asm/c64xplus-gf2m.pl
index c0e5400807..9c46da3af8 100644
--- a/crypto/bn/asm/c64xplus-gf2m.pl
+++ b/crypto/bn/asm/c64xplus-gf2m.pl
@@ -43,7 +43,7 @@ $code.=<<___;
SHRU $A,16, $Ahi ; smash $A to two halfwords
|| EXTU $A,16,16,$Alo
- XORMPY $Alo,$B_2,$Alox2 ; 16x8 bits muliplication
+ XORMPY $Alo,$B_2,$Alox2 ; 16x8 bits multiplication
|| XORMPY $Ahi,$B_2,$Ahix2
|| EXTU $B,16,24,$B_1
XORMPY $Alo,$B_0,$Alox0
diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S
index 2bd4209d61..58f7628e81 100644
--- a/crypto/bn/asm/ia64.S
+++ b/crypto/bn/asm/ia64.S
@@ -20,7 +20,7 @@
// disclaimed.
// ====================================================================
//
-// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
+// Version 2.x is Itanium2 re-tune. Few words about how Itanium2 is
// different from Itanium to this module viewpoint. Most notably, is it
// "wider" than Itanium? Can you experience loop scalability as
// discussed in commentary sections? Not really:-( Itanium2 has 6
@@ -141,7 +141,7 @@
// User Mask I want to excuse the kernel from preserving upper
// (f32-f128) FP register bank over process context switch, thus
// minimizing bus bandwidth consumption during the switch (i.e.
-// after PKI opration completes and the program is off doing
+// after PKI operation completes and the program is off doing
// something else like bulk symmetric encryption). Having said
// this, I also want to point out that it might be good idea
// to compile the whole toolkit (as well as majority of the
@@ -162,7 +162,7 @@
//
// bn_[add|sub]_words routines.
//
-// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
+// Loops are spinning in 2*(n+5) ticks on Itanium (provided that the
// data reside in L1 cache, i.e. 2 ticks away). It's possible to
// compress the epilogue and get down to 2*n+6, but at the cost of
// scalability (the neat feature of this implementation is that it
@@ -500,7 +500,7 @@ bn_sqr_words:
// possible to compress the epilogue (I'm getting tired to write this
// comment over and over) and get down to 2*n+16 at the cost of
// scalability. The decision will very likely be reconsidered after the
-// benchmark program is profiled. I.e. if perfomance gain on Itanium
+// benchmark program is profiled. I.e. if performance gain on Itanium
// will appear larger than loss on "wider" IA-64, then the loop should
// be explicitly split and the epilogue compressed.
.L_bn_sqr_words_ctop:
@@ -936,7 +936,7 @@ bn_mul_comba8:
xma.hu f118=f39,f127,f117 }
{ .mfi; xma.lu f117=f39,f127,f117 };;//
//-------------------------------------------------//
-// Leaving muliplier's heaven... Quite a ride, huh?
+// Leaving multiplier's heaven... Quite a ride, huh?
{ .mii; getf.sig r31=f47
add r25=r25,r24
diff --git a/crypto/bn/asm/parisc-mont.pl b/crypto/bn/asm/parisc-mont.pl
index 5b1c5eacca..a705e16392 100644
--- a/crypto/bn/asm/parisc-mont.pl
+++ b/crypto/bn/asm/parisc-mont.pl
@@ -21,7 +21,7 @@
# optimal in respect to instruction set capabilities. Fair comparison
# with vendor compiler is problematic, because OpenSSL doesn't define
# BN_LLONG [presumably] for historical reasons, which drives compiler
-# toward 4 times 16x16=32-bit multiplicatons [plus complementary
+# toward 4 times 16x16=32-bit multiplications [plus complementary
# shifts and additions] instead. This means that you should observe
# several times improvement over code generated by vendor compiler
# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
diff --git a/crypto/bn/asm/ppc-mont.pl b/crypto/bn/asm/ppc-mont.pl
index 213c7ec777..9aa96c8e87 100644
--- a/crypto/bn/asm/ppc-mont.pl
+++ b/crypto/bn/asm/ppc-mont.pl
@@ -37,7 +37,7 @@
# and squaring procedure operating on lengths divisible by 8. Length
# is expressed in number of limbs. RSA private key operations are
# ~35-50% faster (more for longer keys) on contemporary high-end POWER
-# processors in 64-bit builds, [mysterously enough] more in 32-bit
+# processors in 64-bit builds, [mysteriously enough] more in 32-bit
# builds. On low-end 32-bit processors performance improvement turned
# to be marginal...
diff --git a/crypto/bn/asm/ppc64-mont.pl b/crypto/bn/asm/ppc64-mont.pl
index 1e19c958a1..0b2bb39f26 100644
--- a/crypto/bn/asm/ppc64-mont.pl
+++ b/crypto/bn/asm/ppc64-mont.pl
@@ -35,7 +35,7 @@
# key lengths. As it's obviously inappropriate as "best all-round"
# alternative, it has to be complemented with run-time CPU family
# detection. Oh! It should also be noted that unlike other PowerPC
-# implementation IALU ppc-mont.pl module performs *suboptimaly* on
+# implementation IALU ppc-mont.pl module performs *suboptimally* on
# >=1024-bit key lengths on Power 6. It should also be noted that
# *everything* said so far applies to 64-bit builds! As far as 32-bit
# application executed on 64-bit CPU goes, this module is likely to
@@ -1353,7 +1353,7 @@ $code.=<<___;
std $t3,-16($tp) ; tp[j-1]
std $t5,-8($tp) ; tp[j]
- add $carry,$carry,$ovf ; comsume upmost overflow
+ add $carry,$carry,$ovf ; consume upmost overflow
add $t6,$t6,$carry ; can not overflow
srdi $carry,$t6,16
add $t7,$t7,$carry
diff --git a/crypto/bn/asm/s390x-gf2m.pl b/crypto/bn/asm/s390x-gf2m.pl
index 57b0032d67..06181bf9b9 100644
--- a/crypto/bn/asm/s390x-gf2m.pl
+++ b/crypto/bn/asm/s390x-gf2m.pl
@@ -20,7 +20,7 @@
# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
# the time being... gcc 4.3 appeared to generate poor code, therefore
# the effort. And indeed, the module delivers 55%-90%(*) improvement
-# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
+# on heaviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
# This is for 64-bit build. In 32-bit "highgprs" case improvement is
# even higher, for example on z990 it was measured 80%-150%. ECDSA
diff --git a/crypto/bn/asm/sparcv8.S b/crypto/bn/asm/sparcv8.S
index 46d247353b..75d72eb92c 100644
--- a/crypto/bn/asm/sparcv8.S
+++ b/crypto/bn/asm/sparcv8.S
@@ -13,7 +13,7 @@
*/
/*
- * This is my modest contributon to OpenSSL project (see
+ * This is my modest contribution to OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
@@ -159,12 +159,12 @@ bn_mul_add_words:
*/
bn_mul_words:
cmp %o2,0
- bg,a .L_bn_mul_words_proceeed
+ bg,a .L_bn_mul_words_proceed
ld [%o1],%g2
retl
clr %o0
-.L_bn_mul_words_proceeed:
+.L_bn_mul_words_proceed:
andcc %o2,-4,%g0
bz .L_bn_mul_words_tail
clr %o5
@@ -251,12 +251,12 @@ bn_mul_words:
*/
bn_sqr_words:
cmp %o2,0
- bg,a .L_bn_sqr_words_proceeed
+ bg,a .L_bn_sqr_words_proceed
ld [%o1],%g2
retl
clr %o0
-.L_bn_sqr_words_proceeed:
+.L_bn_sqr_words_proceed:
andcc %o2,-4,%g0
bz .L_bn_sqr_words_tail
clr %o5
diff --git a/crypto/bn/asm/sparcv8plus.S b/crypto/bn/asm/sparcv8plus.S
index fded2fa358..fe4699b2bd 100644
--- a/crypto/bn/asm/sparcv8plus.S
+++ b/crypto/bn/asm/sparcv8plus.S
@@ -13,7 +13,7 @@
*/
/*
- * This is my modest contributon to OpenSSL project (see
+ * This is my modest contribution to OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
@@ -278,7 +278,7 @@ bn_mul_add_words:
*/
bn_mul_words:
sra %o2,%g0,%o2 ! signx %o2
- brgz,a %o2,.L_bn_mul_words_proceeed
+ brgz,a %o2,.L_bn_mul_words_proceed
lduw [%o1],%g2
retl
clr %o0
@@ -286,7 +286,7 @@ bn_mul_words:
nop
nop
-.L_bn_mul_words_proceeed:
+.L_bn_mul_words_proceed:
srl %o3,%g0,%o3 ! clruw %o3
andcc %o2,-4,%g0
bz,pn %icc,.L_bn_mul_words_tail
@@ -366,7 +366,7 @@ bn_mul_words:
*/
bn_sqr_words:
sra %o2,%g0,%o2 ! signx %o2
- brgz,a %o2,.L_bn_sqr_words_proceeed
+ brgz,a %o2,.L_bn_sqr_words_proceed
lduw [%o1],%g2
retl
clr %o0
@@ -374,7 +374,7 @@ bn_sqr_words:
nop
nop
-.L_bn_sqr_words_proceeed:
+.L_bn_sqr_words_proceed:
andcc %o2,-4,%g0
nop
bz,pn %icc,.L_bn_sqr_words_tail
diff --git a/crypto/bn/asm/sparcv9-mont.pl b/crypto/bn/asm/sparcv9-mont.pl
index 3268846dfc..44d28ac496 100644
--- a/crypto/bn/asm/sparcv9-mont.pl
+++ b/crypto/bn/asm/sparcv9-mont.pl
@@ -611,7 +611,7 @@ $code.=<<___;
add $tp,8,$tp
.type $fname,#function
.size $fname,(.-$fname)
-.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
+.asciz "Montgomery Multiplication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
.align 32
___
$code =~ s/\`([^\`]*)\`/eval($1)/gem;
diff --git a/crypto/bn/asm/sparcv9a-mont.pl b/crypto/bn/asm/sparcv9a-mont.pl
index 69ef9e2d4f..c8f759df9f 100755
--- a/crypto/bn/asm/sparcv9a-mont.pl
+++ b/crypto/bn/asm/sparcv9a-mont.pl
@@ -865,7 +865,7 @@ $fname:
restore
.type $fname,#function
.size $fname,(.-$fname)
-.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
+.asciz "Montgomery Multiplication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
.align 32
___
diff --git a/crypto/bn/asm/vis3-mont.pl b/crypto/bn/asm/vis3-mont.pl
index 64dba4480f..04694a4c6c 100644
--- a/crypto/bn/asm/vis3-mont.pl
+++ b/crypto/bn/asm/vis3-mont.pl
@@ -16,7 +16,7 @@
# October 2012.
#
-# SPARCv9 VIS3 Montgomery multiplicaion procedure suitable for T3 and
+# SPARCv9 VIS3 Montgomery multiplication procedure suitable for T3 and
# onward. There are three new instructions used here: umulxhi,
# addxc[cc] and initializing store. On T3 RSA private key operations
# are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
diff --git a/crypto/bn/asm/x86-gf2m.pl b/crypto/bn/asm/x86-gf2m.pl
index 562f539943..d03efcc750 100644
--- a/crypto/bn/asm/x86-gf2m.pl
+++ b/crypto/bn/asm/x86-gf2m.pl
@@ -152,7 +152,7 @@ $R="mm0";
&xor ($a4,$a2); # a2=a4^a2^a4
&mov (&DWP(5*4,"esp"),$a1); # a1^a4
&xor ($a4,$a1); # a1^a2^a4
- &sar (@i[1],31); # broardcast 30th bit
+ &sar (@i[1],31); # broadcast 30th bit
&and ($lo,$b);
&mov (&DWP(6*4,"esp"),$a2); # a2^a4
&and (@i[1],$b);
diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl
index 32daf5c215..66997e279b 100755
--- a/crypto/bn/asm/x86-mont.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -78,7 +78,7 @@ $frame=32; # size of above frame rounded up to 16n
&lea ("ebp",&DWP(-$frame,"esp","edi",4)); # future alloca($frame+4*(num+2))
&neg ("edi");
- # minimize cache contention by arraning 2K window between stack
+ # minimize cache contention by arranging 2K window between stack
# pointer and ap argument [np is also position sensitive vector,
# but it's assumed to be near ap, as it's allocated at ~same
# time].
diff --git a/crypto/bn/asm/x86_64-gf2m.pl b/crypto/bn/asm/x86_64-gf2m.pl
index 0181f52ca4..0fd6e985d7 100644
--- a/crypto/bn/asm/x86_64-gf2m.pl
+++ b/crypto/bn/asm/x86_64-gf2m.pl
@@ -68,7 +68,7 @@ _mul_1x1:
sar \$63,$i0 # broadcast 62nd bit
lea (,$a1,4),$a4
and $b,$a
- sar \$63,$i1 # boardcast 61st bit
+ sar \$63,$i1 # broadcast 61st bit
mov $a,$hi # $a is $lo
shl \$63,$lo
and $b,$i0
diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl
index a4da3de7fa..7e6e9027de 100755
--- a/crypto/bn/asm/x86_64-mont.pl
+++ b/crypto/bn/asm/x86_64-mont.pl
@@ -319,7 +319,7 @@ $code.=<<___;
mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
mov 8($ap,$i,8),%rax # tp[i+1]
lea 1($i),$i # i++
- dec $j # doesnn't affect CF!
+ dec $j # doesn't affect CF!
jnz .Lsub
sbb \$0,%rax # handle upmost overflow bit
@@ -750,7 +750,7 @@ $code.=<<___;
mov 56($ap,$i,8),@ri[3]
sbb 40($np,$i,8),@ri[1]
lea 4($i),$i # i++
- dec $j # doesnn't affect CF!
+ dec $j # doesn't affect CF!
jnz .Lsub4x
mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl
index 1666fbd7a2..263543579f 100755
--- a/crypto/bn/asm/x86_64-mont5.pl
+++ b/crypto/bn/asm/x86_64-mont5.pl
@@ -419,7 +419,7 @@ $code.=<<___;
mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
mov 8($ap,$i,8),%rax # tp[i+1]
lea 1($i),$i # i++
- dec $j # doesnn't affect CF!
+ dec $j # doesn't affect CF!
jnz .Lsub
sbb \$0,%rax # handle upmost overflow bit
@@ -2421,7 +2421,7 @@ my $N=$STRIDE/4; # should match cache line size
$code.=<<___;
movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
- lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
+ lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimization)
lea 128($bp),$bptr # size optimization
pshufd \$0,%xmm5,%xmm5 # broadcast index
diff --git a/crypto/c64xpluscpuid.pl b/crypto/c64xpluscpuid.pl
index 9efe1205ff..b7b11d5031 100644
--- a/crypto/c64xpluscpuid.pl
+++ b/crypto/c64xpluscpuid.pl
@@ -231,7 +231,7 @@ bus_loop1?:
_OPENSSL_instrument_bus2:
.asmfunc
MV A6,B0 ; reassign max
-|| MV B4,A6 ; reassing sizeof(output)
+|| MV B4,A6 ; reassign sizeof(output)
|| MVK 0x00004030,A3
MV A4,B4 ; reassign output
|| MVK 0,A4 ; return value
diff --git a/crypto/camellia/asm/cmllt4-sparcv9.pl b/crypto/camellia/asm/cmllt4-sparcv9.pl
index 50bddfed29..6396679a5a 100644
--- a/crypto/camellia/asm/cmllt4-sparcv9.pl
+++ b/crypto/camellia/asm/cmllt4-sparcv9.pl
@@ -17,7 +17,7 @@
# Camellia for SPARC T4.
#
# As with AES below results [for aligned data] are virtually identical
-# to critical path lenths for 3-cycle instruction latency:
+# to critical path lengths for 3-cycle instruction latency:
#
# 128-bit key 192/256-
# CBC encrypt 4.14/4.21(*) 5.46/5.52
@@ -25,7 +25,7 @@
# misaligned data.
#
# As with Intel AES-NI, question is if it's possible to improve
-# performance of parallelizeable modes by interleaving round
+# performance of parallelizable modes by interleaving round
# instructions. In Camellia every instruction is dependent on
# previous, which means that there is place for 2 additional ones
# in between two dependent. Can we expect 3x performance improvement?
diff --git a/crypto/chacha/asm/chacha-c64xplus.pl b/crypto/chacha/asm/chacha-c64xplus.pl
index bdb380442c..266401eb16 100755
--- a/crypto/chacha/asm/chacha-c64xplus.pl
+++ b/crypto/chacha/asm/chacha-c64xplus.pl
@@ -22,7 +22,7 @@
# faster than code generated by TI compiler. Compiler also disables
# interrupts for some reason, thus making interrupt response time
# dependent on input length. This module on the other hand is free
-# from such limiation.
+# from such limitation.
$output=pop;
open STDOUT,">$output";
diff --git a/crypto/des/asm/des-586.pl b/crypto/des/asm/des-586.pl
index fe82d4a5e0..2bcc54ef2f 100644
--- a/crypto/des/asm/des-586.pl
+++ b/crypto/des/asm/des-586.pl
@@ -15,7 +15,7 @@ require "x86asm.pl";
require "cbc.pl";
require "desboth.pl";
-# base code is in microsft
+# base code is in Microsoft
# op dest, source
# format.
#
diff --git a/crypto/des/asm/des_enc.m4 b/crypto/des/asm/des_enc.m4
index c1f6391f53..122a0fa28c 100644
--- a/crypto/des/asm/des_enc.m4
+++ b/crypto/des/asm/des_enc.m4
@@ -528,7 +528,7 @@ $4:
! parameter 3 1 for optional store to [in0]
! parameter 4 1 for load input/output address to local5/7
!
-! The final permutation logic switches the halfes, meaning that
+! The final permutation logic switches the halves, meaning that
! left and right ends up the the registers originally used.
define(fp_macro, {
@@ -731,7 +731,7 @@ define(fp_ip_macro, {
sll $4, 3, local2
xor local4, temp2, $2
- ! reload since used as temporar:
+ ! reload since used as temporary:
ld [out2+280], out4 ! loop counter
@@ -753,7 +753,7 @@ define(fp_ip_macro, {
! parameter 1 address
! parameter 2 destination left
! parameter 3 destination right
-! parameter 4 temporar
+! parameter 4 temporary
! parameter 5 label
define(load_little_endian, {
@@ -802,7 +802,7 @@ $5a:
! parameter 1 address
! parameter 2 destination left
! parameter 3 destination right
-! parameter 4 temporar
+! parameter 4 temporary
! parameter 4 label
!
! adds 8 to address
@@ -927,7 +927,7 @@ $7.jmp.table:
! parameter 1 address
! parameter 2 source left
! parameter 3 source right
-! parameter 4 temporar
+! parameter 4 temporary
define(store_little_endian, {
@@ -1517,7 +1517,7 @@ DES_ncbc_encrypt:
! parameter 7 1 for mov in1 to in3
! parameter 8 1 for mov in3 to in4
- ip_macro(in5, out5, out5, in5, in4, 2, 0, 1) ! include decryprion ks in4
+ ip_macro(in5, out5, out5, in5, in4, 2, 0, 1) ! include decryption ks in4
fp_macro(out5, in5, 0, 1) ! 1 for input and output address to local5/7
@@ -1563,7 +1563,7 @@ DES_ncbc_encrypt:
.size DES_ncbc_encrypt, .DES_ncbc_encrypt.end-DES_ncbc_encrypt
-! void DES_ede3_cbc_encrypt(input, output, lenght, ks1, ks2, ks3, ivec, enc)
+! void DES_ede3_cbc_encrypt(input, output, length, ks1, ks2, ks3, ivec, enc)
! **************************************************************************
@@ -1811,7 +1811,7 @@ DES_ede3_cbc_encrypt:
.byte 240, 240, 240, 240, 244, 244, 244, 244
.byte 248, 248, 248, 248, 252, 252, 252, 252
- ! 5 numbers for initil/final permutation
+ ! 5 numbers for initial/final permutation
.word 0x0f0f0f0f ! offset 256
.word 0x0000ffff ! 260
diff --git a/crypto/des/cfb_enc.c b/crypto/des/cfb_enc.c
index 6c428ba61f..544392e405 100644
--- a/crypto/des/cfb_enc.c
+++ b/crypto/des/cfb_enc.c
@@ -37,7 +37,7 @@ void DES_cfb_encrypt(const unsigned char *in, unsigned char *out, int numbits,
unsigned int sh[4];
unsigned char *ovec = (unsigned char *)sh;
- /* I kind of count that compiler optimizes away this assertioni, */
+ /* I kind of count that compiler optimizes away this assertion, */
assert(sizeof(sh[0]) == 4); /* as this holds true for all, */
/* but 16-bit platforms... */
diff --git a/crypto/ec/asm/ecp_nistz256-armv4.pl b/crypto/ec/asm/ecp_nistz256-armv4.pl
index 2314b75244..c414334d1e 100755
--- a/crypto/ec/asm/ecp_nistz256-armv4.pl
+++ b/crypto/ec/asm/ecp_nistz256-armv4.pl
@@ -233,7 +233,7 @@ __ecp_nistz256_add:
@ if a+b >= modulus, subtract modulus.
@
@ But since comparison implies subtraction, we subtract
- @ modulus and then add it back if subraction borrowed.
+ @ modulus and then add it back if subtraction borrowed.
subs $a0,$a0,#-1
sbcs $a1,$a1,#-1
@@ -1222,7 +1222,7 @@ __ecp_nistz256_add_self:
@ if a+b >= modulus, subtract modulus.
@
@ But since comparison implies subtraction, we subtract
- @ modulus and then add it back if subraction borrowed.
+ @ modulus and then add it back if subtraction borrowed.
subs $a0,$a0,#-1
sbcs $a1,$a1,#-1
diff --git a/crypto/ec/asm/ecp_nistz256-avx2.pl b/crypto/ec/asm/ecp_nistz256-avx2.pl
index ed9018af51..9fbc909ba9 100755
--- a/crypto/ec/asm/ecp_nistz256-avx2.pl
+++ b/crypto/ec/asm/ecp_nistz256-avx2.pl
@@ -137,7 +137,7 @@ ___
{
# This function receives a pointer to an array of four affine points
-# (X, Y, <1>) and rearanges the data for AVX2 execution, while
+# (X, Y, <1>) and rearranges the data for AVX2 execution, while
# converting it to 2^29 radix redundant form
my ($X0,$X1,$X2,$X3, $Y0,$Y1,$Y2,$Y3,
@@ -289,7 +289,7 @@ ___
{
################################################################################
# This function receives a pointer to an array of four AVX2 formatted points
-# (X, Y, Z) convert the data to normal representation, and rearanges the data
+# (X, Y, Z) convert the data to normal representation, and rearranges the data
my ($D0,$D1,$D2,$D3, $D4,$D5,$D6,$D7, $D8)=map("%ymm$_",(0..8));
my ($T0,$T1,$T2,$T3, $T4,$T5,$T6)=map("%ymm$_",(9..15));
diff --git a/crypto/ec/asm/ecp_nistz256-ppc64.pl b/crypto/ec/asm/ecp_nistz256-ppc64.pl
index 70af6b6f5e..0c3c186b31 100755
--- a/crypto/ec/asm/ecp_nistz256-ppc64.pl
+++ b/crypto/ec/asm/ecp_nistz256-ppc64.pl
@@ -696,7 +696,7 @@ __ecp_nistz256_add:
# if a+b >= modulus, subtract modulus
#
# But since comparison implies subtraction, we subtract
- # modulus and then add it back if subraction borrowed.
+ # modulus and then add it back if subtraction borrowed.
subic $acc0,$acc0,-1
subfe $acc1,$poly1,$acc1
diff --git a/crypto/ec/asm/ecp_nistz256-sparcv9.pl b/crypto/ec/asm/ecp_nistz256-sparcv9.pl
index ee11069459..9af1fae853 100755
--- a/crypto/ec/asm/ecp_nistz256-sparcv9.pl
+++ b/crypto/ec/asm/ecp_nistz256-sparcv9.pl
@@ -413,7 +413,7 @@ __ecp_nistz256_add:
! if a+b >= modulus, subtract modulus.
!
! But since comparison implies subtraction, we subtract
- ! modulus and then add it back if subraction borrowed.
+ ! modulus and then add it back if subtraction borrowed.
subcc @acc[0],-1,@acc[0]
subccc @acc[1],-1,@acc[1]
@@ -1592,7 +1592,7 @@ ___
########################################################################
# Following subroutines are VIS3 counterparts of those above that
# implement ones found in ecp_nistz256.c. Key difference is that they
-# use 128-bit muliplication and addition with 64-bit carry, and in order
+# use 128-bit multiplication and addition with 64-bit carry, and in order
# to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
# entry and vice versa on return.
#
@@ -1977,7 +1977,7 @@ $code.=<<___;
srlx $acc0,32,$t1
addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
- addxc %g0,$t3,$acc3 ! cant't overflow
+ addxc %g0,$t3,$acc3 ! can't overflow
___
}
$code.=<<___;
diff --git a/crypto/ec/ecdsa_ossl.c b/crypto/ec/ecdsa_ossl.c
index ef912822ee..7118e43bfc 100644
--- a/crypto/ec/ecdsa_ossl.c
+++ b/crypto/ec/ecdsa_ossl.c
@@ -157,7 +157,7 @@ static int ecdsa_sign_setup(EC_KEY *eckey, BN_CTX *ctx_in,
if (EC_GROUP_get_mont_data(group) != NULL) {
/*
* We want inverse in constant time, therefore we utilize the fact
- * order must be prime and use Fermats Little Theorem instead.
+ * order must be prime and use Fermat's Little Theorem instead.
*/
if (!BN_set_word(X, 2)) {
ECerr(EC_F_ECDSA_SIGN_SETUP, ERR_R_BN_LIB);
diff --git a/crypto/ec/ecp_nistp224.c b/crypto/ec/ecp_nistp224.c
index 7fda43f579..820d5e0a2c 100644
--- a/crypto/ec/ecp_nistp224.c
+++ b/crypto/ec/ecp_nistp224.c
@@ -79,7 +79,7 @@ typedef limb felem[4];
typedef widelimb widefelem[7];
/*
- * Field element represented as a byte arrary. 28*8 = 224 bits is also the
+ * Field element represented as a byte array. 28*8 = 224 bits is also the
* group order size for the elliptic curve, and we also use this type for
* scalars for point multiplication.
*/
diff --git a/crypto/ec/ecp_oct.c b/crypto/ec/ecp_oct.c
index 7b876cc687..a5010bcf52 100644
--- a/crypto/ec/ecp_oct.c
+++ b/crypto/ec/ecp_oct.c
@@ -125,7 +125,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group,
EC_R_INVALID_COMPRESSION_BIT);
else
/*
- * BN_mod_sqrt() should have cought this error (not a square)
+ * BN_mod_sqrt() should have caught this error (not a square)
*/
ECerr(EC_F_EC_GFP_SIMPLE_SET_COMPRESSED_COORDINATES,
EC_R_INVALID_COMPRESSED_POINT);
diff --git a/crypto/engine/README b/crypto/engine/README
index 8daa1f7798..0050b9e509 100644
--- a/crypto/engine/README
+++ b/crypto/engine/README
@@ -161,7 +161,7 @@ actually qualitatively different depending on 'nid' (the "des_cbc" EVP_CIPHER is
not an interoperable implementation of "aes_256_cbc"), RSA_METHODs are
necessarily interoperable and don't have different flavours, only different
implementations. In other words, the ENGINE_TABLE for RSA will either be empty,
-or will have a single ENGING_PILE hashed to by the 'nid' 1 and that pile
+or will have a single ENGINE_PILE hashed to by the 'nid' 1 and that pile
represents ENGINEs that implement the single "type" of RSA there is.
Cleanup - the registration and unregistration may pose questions about how
@@ -188,7 +188,7 @@ state will be unchanged. Thus, no cleanup is required unless registration takes
place. ENGINE_cleanup() will simply iterate across a list of registered cleanup
callbacks calling each in turn, and will then internally delete its own storage
(a STACK). When a cleanup callback is next registered (eg. if the cleanup() is
-part of a gracefull restart and the application wants to cleanup all state then
+part of a graceful restart and the application wants to cleanup all state then
start again), the internal STACK storage will be freshly allocated. This is much
the same as the situation in the ENGINE_TABLE instantiations ... NULL is the
initialised state, so only modification operations (not queries) will cause that
@@ -204,7 +204,7 @@ exists) - the idea of providing an ENGINE_cpy() function probably wasn't a good
one and now certainly doesn't make sense in any generalised way. Some of the
RSA, DSA, DH, and RAND functions that were fiddled during the original ENGINE
changes have now, as a consequence, been reverted back. This is because the
-hooking of ENGINE is now automatic (and passive, it can interally use a NULL
+hooking of ENGINE is now automatic (and passive, it can internally use a NULL
ENGINE pointer to simply ignore ENGINE from then on).
Hell, that should be enough for now ... comments welcome.
diff --git a/crypto/include/internal/aria.h b/crypto/include/internal/aria.h
index 0ba093922d..073827185a 100644
--- a/crypto/include/internal/aria.h
+++ b/crypto/include/internal/aria.h
@@ -8,7 +8,7 @@
* https://www.openssl.org/source/license.html
*/
- /* Copyright (c) 2017 National Security Resarch Institute. All rights reserved. */
+ /* Copyright (c) 2017 National Security Research Institute. All rights reserved. */
#ifndef HEADER_ARIA_H
# define HEADER_ARIA_H
@@ -22,7 +22,7 @@
# define ARIA_ENCRYPT 1
# define ARIA_DECRYPT 0
-# define ARIA_BLOCK_SIZE 16 /* Size of each encryption/decription block */
+# define ARIA_BLOCK_SIZE 16 /* Size of each encryption/decryption block */
# define ARIA_MAX_KEYS 17 /* Number of keys needed in the worst case */
# ifdef __cplusplus
diff --git a/crypto/include/internal/evp_int.h b/crypto/include/internal/evp_int.h
index f409400285..562ac42ab5 100644
--- a/crypto/include/internal/evp_int.h
+++ b/crypto/include/internal/evp_int.h
@@ -396,7 +396,7 @@ void openssl_add_all_digests_int(void);
void evp_cleanup_int(void);
void evp_app_cleanup_int(void);
-/* Pulling defines out of C soure files */
+/* Pulling defines out of C source files */
#define EVP_RC4_KEY_SIZE 16
#ifndef TLS1_1_VERSION
diff --git a/crypto/modes/asm/ghash-ia64.pl b/crypto/modes/asm/ghash-ia64.pl
index 81e75f71a8..eb9ded91e5 100755
--- a/crypto/modes/asm/ghash-ia64.pl
+++ b/crypto/modes/asm/ghash-ia64.pl
@@ -156,7 +156,7 @@ $code.=<<___;
___
######################################################################
-# "528B" (well, "512B" actualy) streamed GHASH
+# "528B" (well, "512B" actually) streamed GHASH
#
$Xip="in0";
$Htbl="in1";
diff --git a/crypto/modes/asm/ghash-parisc.pl b/crypto/modes/asm/ghash-parisc.pl
index 1d6254543b..fef2db1940 100644
--- a/crypto/modes/asm/ghash-parisc.pl
+++ b/crypto/modes/asm/ghash-parisc.pl
@@ -705,7 +705,7 @@ my $depd = sub {
my ($mod,$args) = @_;
my $orig = "depd$mod\t$args";
- # I only have ",z" completer, it's impicitly encoded...
+ # I only have ",z" completer, it's implicitly encoded...
if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 16
{ my $opcode=(0x3c<<26)|($4<<21)|($1<<16);
my $cpos=63-$2;
diff --git a/crypto/modes/asm/ghash-x86.pl b/crypto/modes/asm/ghash-x86.pl
index e9d0d0f7d6..bcbe6e399d 100644
--- a/crypto/modes/asm/ghash-x86.pl
+++ b/crypto/modes/asm/ghash-x86.pl
@@ -103,7 +103,7 @@
#
# Does it make sense to increase Naggr? To start with it's virtually
# impossible in 32-bit mode, because of limited register bank
-# capacity. Otherwise improvement has to be weighed agiainst slower
+# capacity. Otherwise improvement has to be weighed against slower
# setup, as well as code size and complexity increase. As even
# optimistic estimate doesn't promise 30% performance improvement,
# there are currently no plans to increase Naggr.
diff --git a/crypto/modes/asm/ghashp8-ppc.pl b/crypto/modes/asm/ghashp8-ppc.pl
index f0598cb28c..45c6438497 100755
--- a/crypto/modes/asm/ghashp8-ppc.pl
+++ b/crypto/modes/asm/ghashp8-ppc.pl
@@ -23,7 +23,7 @@
# Relative comparison is therefore more informative. This initial
# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
# faster than "4-bit" integer-only compiler-generated 64-bit code.
-# "Initial version" means that there is room for futher improvement.
+# "Initial version" means that there is room for further improvement.
# May 2016
#
diff --git a/crypto/modes/asm/ghashv8-armx.pl b/crypto/modes/asm/ghashv8-armx.pl
index c7ac7f6957..a2b0db29ff 100644
--- a/crypto/modes/asm/ghashv8-armx.pl
+++ b/crypto/modes/asm/ghashv8-armx.pl
@@ -206,13 +206,13 @@ $code.=<<___;
@ loaded value would have
@ to be rotated in order to
@ make it appear as in
- @ alorithm specification
+ @ algorithm specification
subs $len,$len,#32 @ see if $len is 32 or larger
mov $inc,#16 @ $inc is used as post-
@ increment for input pointer;
@ as loop is modulo-scheduled
@ $inc is zeroed just in time
- @ to preclude oversteping
+ @ to preclude overstepping
@ inp[len], which means that
@ last block[s] are actually
@ loaded twice, but last
@@ -370,7 +370,7 @@ if ($flavour =~ /64/) { ######## 64-bit code
s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
s/@\s/\/\//o; # old->new style commentary
- # fix up remainig legacy suffixes
+ # fix up remaining legacy suffixes
s/\.[ui]?8(\s)/$1/o;
s/\.[uis]?32//o and s/\.16b/\.4s/go;
m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
@@ -410,7 +410,7 @@ if ($flavour =~ /64/) { ######## 64-bit code
s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
s/\/\/\s?/@ /o; # new->old style commentary
- # fix up remainig new-style suffixes
+ # fix up remaining new-style suffixes
s/\],#[0-9]+/]!/o;
s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
diff --git a/crypto/objects/README b/crypto/objects/README
index cb1d216ce8..700f9c5e54 100644
--- a/crypto/objects/README
+++ b/crypto/objects/README
@@ -16,7 +16,7 @@ The basic syntax for adding an object is as follows:
create the C macros SN_base, LN_base, NID_base and OBJ_base.
Note that if the base name contains spaces, dashes or periods,
- those will be converte to underscore.
+ those will be converted to underscore.
Then there are some extra commands:
diff --git a/crypto/objects/objects.txt b/crypto/objects/objects.txt
index bbef44e6aa..87c2683291 100644
--- a/crypto/objects/objects.txt
+++ b/crypto/objects/objects.txt
@@ -855,7 +855,7 @@ internet 6 : snmpv2 : SNMPv2
# Documents refer to "internet 7" as "mail". This however leads to ambiguities
# with RFC2798, Section 9.1.3, where "mail" is defined as the short name for
# rfc822Mailbox. The short name is therefore here left out for a reason.
-# Subclasses of "mail", e.g. "MIME MHS" don't consitute a problem, as
+# Subclasses of "mail", e.g. "MIME MHS" don't constitute a problem, as
# references are realized via long name "Mail" (with capital M).
internet 7 : : Mail
@@ -1502,7 +1502,7 @@ ISO-US 10046 2 1 : dhpublicnumber : X9.42 DH
# RFC 5639 curve OIDs (see http://www.ietf.org/rfc/rfc5639.txt)
# versionOne OBJECT IDENTIFIER ::= {
-# iso(1) identifified-organization(3) teletrust(36) algorithm(3)
+# iso(1) identified-organization(3) teletrust(36) algorithm(3)
# signature-algorithm(3) ecSign(2) ecStdCurvesAndGeneration(8)
# ellipticCurve(1) 1 }
1 3 36 3 3 2 8 1 1 1 : brainpoolP160r1
diff --git a/crypto/perlasm/x86_64-xlate.pl b/crypto/perlasm/x86_64-xlate.pl
index 645be9184d..eac21c1c69 100755
--- a/crypto/perlasm/x86_64-xlate.pl
+++ b/crypto/perlasm/x86_64-xlate.pl
@@ -531,7 +531,7 @@ my %globals;
);
# Following constants are defined in x86_64 ABI supplement, for
- # example avaiable at https://www.uclibc.org/docs/psABI-x86_64.pdf,
+ # example available at https://www.uclibc.org/docs/psABI-x86_64.pdf,
# see section 3.7 "Stack Unwind Algorithm".
my %DW_reg_idx = (
"%rax"=>0, "%rdx"=>1, "%rcx"=>2, "%rbx"=>3,
@@ -544,7 +544,7 @@ my %globals;
# [us]leb128 format is variable-length integer representation base
# 2^128, with most significant bit of each byte being 0 denoting
- # *last* most significat digit. See "Variable Length Data" in the
+ # *last* most significant digit. See "Variable Length Data" in the
# DWARF specification, numbered 7.6 at least in versions 3 and 4.
sub sleb128 {
use integer; # get right shift extend sign
@@ -1427,6 +1427,6 @@ close STDOUT;
#
# (*) Note that we're talking about run-time, not debug-time. Lack of
# unwind information makes debugging hard on both Windows and
-# Unix. "Unlike" referes to the fact that on Unix signal handler
+# Unix. "Unlike" refers to the fact that on Unix signal handler
# will always be invoked, core dumped and appropriate exit code
# returned to parent (for user notification).
diff --git a/crypto/poly1305/asm/poly1305-x86.pl b/crypto/poly1305/asm/poly1305-x86.pl
index b6f09bf737..ec1efd93bf 100755
--- a/crypto/poly1305/asm/poly1305-x86.pl
+++ b/crypto/poly1305/asm/poly1305-x86.pl
@@ -730,7 +730,7 @@ my $extra = shift;
&movdqa ($T0,$T1); # -> base 2^26 ...
&pand ($T1,$MASK);
- &paddd ($D0,$T1); # ... and accumuate
+ &paddd ($D0,$T1); # ... and accumulate
&movdqa ($T1,$T0);
&psrlq ($T0,26);
diff --git a/crypto/poly1305/asm/poly1305-x86_64.pl b/crypto/poly1305/asm/poly1305-x86_64.pl
index 1faa6ebf46..4482d3971c 100755
--- a/crypto/poly1305/asm/poly1305-x86_64.pl
+++ b/crypto/poly1305/asm/poly1305-x86_64.pl
@@ -298,7 +298,7 @@ poly1305_emit:
mov %r9,%rcx
adc \$0,%r9
adc \$0,%r10
- shr \$2,%r10 # did 130-bit value overfow?
+ shr \$2,%r10 # did 130-bit value overflow?
cmovnz %r8,%rax
cmovnz %r9,%rcx
@@ -1403,7 +1403,7 @@ poly1305_emit_avx:
mov %r9,%rcx
adc \$0,%r9
adc \$0,%r10
- shr \$2,%r10 # did 130-bit value overfow?
+ shr \$2,%r10 # did 130-bit value overflow?
cmovnz %r8,%rax
cmovnz %r9,%rcx
@@ -3734,7 +3734,7 @@ poly1305_emit_base2_44:
mov %r9,%rcx
adc \$0,%r9
adc \$0,%r10
- shr \$2,%r10 # did 130-bit value overfow?
+ shr \$2,%r10 # did 130-bit value overflow?
cmovnz %r8,%rax
cmovnz %r9,%rcx
diff --git a/crypto/rc4/asm/rc4-586.pl b/crypto/rc4/asm/rc4-586.pl
index fdfa309faf..8c5cf87d05 100644
--- a/crypto/rc4/asm/rc4-586.pl
+++ b/crypto/rc4/asm/rc4-586.pl
@@ -134,7 +134,7 @@ if ($alt=0) {
push (@XX,shift(@XX)) if ($i>=0);
}
} else {
- # Using pinsrw here improves performane on Intel CPUs by 2-3%, but
+ # Using pinsrw here improves performance on Intel CPUs by 2-3%, but
# brings down AMD by 7%...
$RC4_loop_mmx = sub {
my $i=shift;
diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl
index 0e283145b6..1a9cc47d72 100755
--- a/crypto/rc4/asm/rc4-x86_64.pl
+++ b/crypto/rc4/asm/rc4-x86_64.pl
@@ -88,7 +88,7 @@
# The only code path that was not modified is P4-specific one. Non-P4
# Intel code path optimization is heavily based on submission by Maxim
# Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
-# some of the ideas even in attempt to optmize the original RC4_INT
+# some of the ideas even in attempt to optimize the original RC4_INT
# code path... Current performance in cycles per processed byte (less
# is better) and improvement coefficients relative to previous
# version of this module are:
diff --git a/crypto/rsa/rsa_pmeth.c b/crypto/rsa/rsa_pmeth.c
index a23d428777..5e2df3e8ff 100644
--- a/crypto/rsa/rsa_pmeth.c
+++ b/crypto/rsa/rsa_pmeth.c
@@ -790,7 +790,7 @@ static int pkey_pss_init(EVP_PKEY_CTX *ctx)
if (!rsa_pss_get_param(rsa->pss, &md, &mgf1md, &min_saltlen))
return 0;
- /* See if minumum salt length exceeds maximum possible */
+ /* See if minimum salt length exceeds maximum possible */
max_saltlen = RSA_size(rsa) - EVP_MD_size(md);
if ((RSA_bits(rsa) & 0x7) == 1)
max_saltlen--;
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
index c6c977ab03..ca3763991c 100644
--- a/crypto/sha/asm/sha1-586.pl
+++ b/crypto/sha/asm/sha1-586.pl
@@ -35,7 +35,7 @@
# P4 +85%(!) +45%
#
# As you can see Pentium came out as looser:-( Yet I reckoned that
-# improvement on P4 outweights the loss and incorporate this
+# improvement on P4 outweighs the loss and incorporate this
# re-tuned code to 0.9.7 and later.
# ----------------------------------------------------------------
@@ -549,7 +549,7 @@ for($i=0;$i<20-4;$i+=2) {
# being implemented in SSSE3). Once 8 quadruples or 32 elements are
# collected, it switches to routine proposed by Max Locktyukhin.
#
-# Calculations inevitably require temporary reqisters, and there are
+# Calculations inevitably require temporary registers, and there are
# no %xmm registers left to spare. For this reason part of the ring
# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl
index 56e15292a7..443b649830 100644
--- a/crypto/sha/asm/sha1-mb-x86_64.pl
+++ b/crypto/sha/asm/sha1-mb-x86_64.pl
@@ -444,7 +444,7 @@ for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
movdqa (%rbx),@Xi[0] # pull counters
mov \$1,%ecx
- cmp 4*0(%rbx),%ecx # examinte counters
+ cmp 4*0(%rbx),%ecx # examine counters
pxor $t2,$t2
cmovge $Tbl,@ptr[0] # cancel input
cmp 4*1(%rbx),%ecx
@@ -1508,10 +1508,10 @@ avx2_handler:
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
- mov %r15,240($context) # restore cotnext->R15
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
lea -56-10*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
index 8b7bbfc261..60819f6186 100755
--- a/crypto/sha/asm/sha1-x86_64.pl
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -1984,9 +1984,9 @@ ssse3_handler:
mov -40(%rax),%r14
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
.Lcommon_seh_tail:
mov 8(%rax),%rdi
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index bc25b2dab5..48830728e7 100644
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -18,7 +18,7 @@
#
# Performance improvement over compiler generated code varies from
# 10% to 40% [see below]. Not very impressive on some µ-archs, but
-# it's 5 times smaller and optimizies amount of writes.
+# it's 5 times smaller and optimizes amount of writes.
#
# May 2012.
#
diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl
index 24276f9e67..73978dbd81 100644
--- a/crypto/sha/asm/sha256-mb-x86_64.pl
+++ b/crypto/sha/asm/sha256-mb-x86_64.pl
@@ -1508,10 +1508,10 @@ avx2_handler:
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore cotnext->R12
- mov %r13,224($context) # restore cotnext->R13
- mov %r14,232($context) # restore cotnext->R14
- mov %r15,240($context) # restore cotnext->R15
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
lea -56-10*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl
index 35006e8b9f..867ce30b97 100644
--- a/crypto/sha/asm/sha512-586.pl
+++ b/crypto/sha/asm/sha512-586.pl
@@ -42,7 +42,7 @@
# (*) whichever best applicable.
# (**) x86_64 assembler performance is presented for reference
# purposes, the results are for integer-only code.
-# (***) paddq is increadibly slow on Atom.
+# (***) paddq is incredibly slow on Atom.
#
# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
# performance improvement over compiler generated code reaches ~60%,
diff --git a/crypto/sha/asm/sha512-armv8.pl b/crypto/sha/asm/sha512-armv8.pl
index c55efb3085..88ad44ee5e 100644
--- a/crypto/sha/asm/sha512-armv8.pl
+++ b/crypto/sha/asm/sha512-armv8.pl
@@ -35,7 +35,7 @@
# on Cortex-A53 (or by 4 cycles per round).
# (***) Super-impressive coefficients over gcc-generated code are
# indication of some compiler "pathology", most notably code
-# generated with -mgeneral-regs-only is significanty faster
+# generated with -mgeneral-regs-only is significantly faster
# and the gap is only 40-90%.
#
# October 2016.
diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl
index 1469bedbd6..5a082ba623 100755
--- a/crypto/sha/asm/sha512-parisc.pl
+++ b/crypto/sha/asm/sha512-parisc.pl
@@ -773,7 +773,7 @@ foreach (split("\n",$code)) {
s/shd\s+(%r[0-9]+),(%r[0-9]+),([0-9]+)/
$3>31 ? sprintf("shd\t%$2,%$1,%d",$3-32) # rotation for >=32
: sprintf("shd\t%$1,%$2,%d",$3)/e or
- # translate made up instructons: _ror, _shr, _align, _shl
+ # translate made up instructions: _ror, _shr, _align, _shl
s/_ror(\s+)(%r[0-9]+),/
($SZ==4 ? "shd" : "shrpd")."$1$2,$2,"/e or
diff --git a/crypto/sha/asm/sha512-ppc.pl b/crypto/sha/asm/sha512-ppc.pl
index fe95b01509..71699f6637 100755
--- a/crypto/sha/asm/sha512-ppc.pl
+++ b/crypto/sha/asm/sha512-ppc.pl
@@ -26,7 +26,7 @@
#
# (*) 64-bit code in 32-bit application context, which actually is
# on TODO list. It should be noted that for safe deployment in
-# 32-bit *mutli-threaded* context asyncronous signals should be
+# 32-bit *multi-threaded* context asynchronous signals should be
# blocked upon entry to SHA512 block routine. This is because
# 32-bit signaling procedure invalidates upper halves of GPRs.
# Context switch procedure preserves them, but not signaling:-(
diff --git a/crypto/sha/asm/sha512p8-ppc.pl b/crypto/sha/asm/sha512p8-ppc.pl
index 5457c4aa16..93dfef20a9 100755
--- a/crypto/sha/asm/sha512p8-ppc.pl
+++ b/crypto/sha/asm/sha512p8-ppc.pl
@@ -25,7 +25,7 @@
# sha1-ppc.pl and 1.6x slower than aes-128-cbc. Another interesting
# result is degree of computational resources' utilization. POWER8 is
# "massively multi-threaded chip" and difference between single- and
-# maximum multi-process benchmark results tells that utlization is
+# maximum multi-process benchmark results tells that utilization is
# whooping 94%. For sha512-ppc.pl we get [not unimpressive] 84% and
# for sha1-ppc.pl - 73%. 100% means that multi-process result equals
# to single-process one, given that all threads end up on the same
diff --git a/crypto/sha/keccak1600.c b/crypto/sha/keccak1600.c
index d925734a17..e7223486af 100644
--- a/crypto/sha/keccak1600.c
+++ b/crypto/sha/keccak1600.c
@@ -1055,7 +1055,7 @@ static uint64_t BitDeinterleave(uint64_t Ai)
* as blocksize. It is commonly (1600 - 256*n)/8, e.g. 168, 136, 104,
* 72, but can also be (1600 - 448)/8 = 144. All this means that message
* padding and intermediate sub-block buffering, byte- or bitwise, is
- * caller's reponsibility.
+ * caller's responsibility.
*/
size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
size_t r)
diff --git a/crypto/sha/sha_locl.h b/crypto/sha/sha_locl.h
index 93b0f1ac01..4e5a090382 100644
--- a/crypto/sha/sha_locl.h
+++ b/crypto/sha/sha_locl.h
@@ -68,7 +68,7 @@ int HASH_INIT(SHA_CTX *c)
/*
* As pointed out by Wei Dai, F() below can be simplified to the code in
- * F_00_19. Wei attributes these optimisations to Peter Gutmann's SHS code,
+ * F_00_19. Wei attributes these optimizations to Peter Gutmann's SHS code,
* and he attributes it to Rich Schroeppel.
* #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
* I've just become aware of another tweak to be made, again from Wei Dai,
diff --git a/crypto/x86cpuid.pl b/crypto/x86cpuid.pl
index 1266d7634c..08c129a2a0 100644
--- a/crypto/x86cpuid.pl
+++ b/crypto/x86cpuid.pl
@@ -110,7 +110,7 @@ for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
&cmp ("ebp",0);
&jne (&label("notintel"));
&or ("edx",1<<30); # set reserved bit#30 on Intel CPUs
- &and (&HB("eax"),15); # familiy ID
+ &and (&HB("eax"),15); # family ID
&cmp (&HB("eax"),15); # P4?
&jne (&label("notintel"));
&or ("edx",1<<20); # set reserved bit#20 to engage RC4_CHAR