aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorklemens <ka7@github.com>2016-08-05 19:56:58 +0200
committerRich Salz <rsalz@openssl.org>2016-08-05 19:07:30 -0400
commit6025001707fd65679d758c877200469d4e72ea88 (patch)
tree557bc457aea10e4f854f1ae975d38b0e9c8c79fb /crypto
parent1ccbe6b32c98f61526e364c7abc94f55ec600293 (diff)
downloadopenssl-6025001707fd65679d758c877200469d4e72ea88.tar.gz
spelling fixes, just comments and readme.
Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Rich Salz <rsalz@openssl.org> (Merged from https://github.com/openssl/openssl/pull/1413)
Diffstat (limited to 'crypto')
-rw-r--r--crypto/aes/asm/aes-ia64.S2
-rw-r--r--crypto/aes/asm/aes-ppc.pl2
-rw-r--r--crypto/aes/asm/aesni-sha256-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesni-x86_64.pl2
-rw-r--r--crypto/asn1/tasn_dec.c2
-rw-r--r--crypto/asn1/tasn_fre.c2
-rw-r--r--crypto/bio/b_sock2.c4
-rw-r--r--crypto/bio/bss_mem.c2
-rw-r--r--crypto/bn/asm/ia64.S6
-rw-r--r--crypto/bn/asm/mips.pl2
-rw-r--r--crypto/bn/asm/ppc.pl4
-rw-r--r--crypto/bn/asm/sparcv8plus.S2
-rw-r--r--crypto/bn/asm/sparcv9-mont.pl2
-rwxr-xr-xcrypto/bn/asm/sparcv9a-mont.pl2
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl2
-rwxr-xr-xcrypto/chacha/asm/chacha-armv4.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-armv4.pl2
-rw-r--r--crypto/include/internal/cryptlib_int.h2
-rw-r--r--crypto/modes/asm/ghash-x86.pl6
-rw-r--r--crypto/ocsp/ocsp_cl.c2
-rw-r--r--crypto/pem/pem_lib.c2
-rwxr-xr-xcrypto/poly1305/asm/poly1305-ppc.pl2
-rw-r--r--crypto/rand/rand_egd.c2
-rw-r--r--crypto/rc4/asm/rc4-586.pl2
-rwxr-xr-xcrypto/rc4/asm/rc4-x86_64.pl2
-rwxr-xr-xcrypto/sha/asm/sha1-ppc.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-x86_64.pl2
-rw-r--r--crypto/sparccpuid.S2
-rw-r--r--crypto/x509/x509_vfy.c2
-rw-r--r--crypto/x509/x_crl.c2
-rw-r--r--crypto/x509v3/pcy_tree.c2
31 files changed, 37 insertions, 37 deletions
diff --git a/crypto/aes/asm/aes-ia64.S b/crypto/aes/asm/aes-ia64.S
index ef44f7cc76..f7f1f63c9d 100644
--- a/crypto/aes/asm/aes-ia64.S
+++ b/crypto/aes/asm/aes-ia64.S
@@ -17,7 +17,7 @@
// 'and' which in turn can be assigned to M-port [there're double as
// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
// registers for small constants (255, 24 and 16) to be used with
-// 'shr' and 'and' instructions I can achieve better ILP, Intruction
+// 'shr' and 'and' instructions I can achieve better ILP, Instruction
// Level Parallelism, and performance. This code outperforms GCC 3.3
// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
// HP C - by 40%. Measured best-case scenario, i.e. aligned
diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl
index d02dde5bc5..1558d8e454 100644
--- a/crypto/aes/asm/aes-ppc.pl
+++ b/crypto/aes/asm/aes-ppc.pl
@@ -26,7 +26,7 @@
# February 2010
#
# Rescheduling instructions to favour Power6 pipeline gave 10%
-# performance improvement on the platfrom in question (and marginal
+# performance improvement on the platform in question (and marginal
# improvement even on others). It should be noted that Power6 fails
# to process byte in 18 cycles, only in 23, because it fails to issue
# 4 load instructions in two cycles, only in 3. As result non-compact
diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl
index e9ad24f7d7..a5fde2e4d1 100644
--- a/crypto/aes/asm/aesni-sha256-x86_64.pl
+++ b/crypto/aes/asm/aesni-sha256-x86_64.pl
@@ -35,7 +35,7 @@
# Skylake 2.62/3.14/3.62+7.70 8.10 +27%/34%/40%
# Bulldozer 5.77/6.89/8.00+13.7 13.7 +42%/50%/58%
#
-# (*) there are XOP, AVX1 and AVX2 code pathes, meaning that
+# (*) there are XOP, AVX1 and AVX2 code paths, meaning that
# Westmere is omitted from loop, this is because gain was not
# estimated high enough to justify the effort;
# (**) these are EVP-free results, results obtained with 'speed
diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl
index a03da20a5a..25dd120dd2 100644
--- a/crypto/aes/asm/aesni-x86_64.pl
+++ b/crypto/aes/asm/aesni-x86_64.pl
@@ -4239,7 +4239,7 @@ ___
# Vinodh Gopal <vinodh.gopal@intel.com>
# Kahraman Akdemir
#
-# Agressively optimized in respect to aeskeygenassist's critical path
+# Aggressively optimized in respect to aeskeygenassist's critical path
# and is contained in %xmm0-5 to meet Win64 ABI requirement.
#
# int ${PREFIX}_set_encrypt_key(const unsigned char *inp,
diff --git a/crypto/asn1/tasn_dec.c b/crypto/asn1/tasn_dec.c
index 3af1d1b801..679a50dce5 100644
--- a/crypto/asn1/tasn_dec.c
+++ b/crypto/asn1/tasn_dec.c
@@ -683,7 +683,7 @@ static int asn1_d2i_ex_primitive(ASN1_VALUE **pval,
|| (utype == V_ASN1_SET) || (utype == V_ASN1_OTHER)) {
/*
* Clear context cache for type OTHER because the auto clear when we
- * have a exact match wont work
+ * have a exact match won't work
*/
if (utype == V_ASN1_OTHER) {
asn1_tlc_clear(ctx);
diff --git a/crypto/asn1/tasn_fre.c b/crypto/asn1/tasn_fre.c
index d467b89c80..f730d110bf 100644
--- a/crypto/asn1/tasn_fre.c
+++ b/crypto/asn1/tasn_fre.c
@@ -99,7 +99,7 @@ static void asn1_item_embed_free(ASN1_VALUE **pval, const ASN1_ITEM *it,
asn1_enc_free(pval, it);
/*
* If we free up as normal we will invalidate any ANY DEFINED BY
- * field and we wont be able to determine the type of the field it
+ * field and we won't be able to determine the type of the field it
* defines. So free up in reverse order.
*/
tt = it->templates + it->tcount;
diff --git a/crypto/bio/b_sock2.c b/crypto/bio/b_sock2.c
index 726b0629b3..7f4d89e551 100644
--- a/crypto/bio/b_sock2.c
+++ b/crypto/bio/b_sock2.c
@@ -69,7 +69,7 @@ int BIO_socket(int domain, int socktype, int protocol, int options)
*
* options holds BIO socket options that can be used
* You should call this for every address returned by BIO_lookup
- * until the connection is succesful.
+ * until the connection is successful.
*
* Returns 1 on success or 0 on failure. On failure errno is set
* and an error status is added to the OpenSSL error stack.
@@ -144,7 +144,7 @@ int BIO_connect(int sock, const BIO_ADDR *addr, int options)
* fail. We can't tell the difference between already listening ourself to
* it and someone else listening to it when failing and errno is EADDRINUSE, so
* it's recommended to not give an error in that case if the first call was
- * succesful.
+ * successful.
*
* When restarting the program it could be that the port is still in use. If
* you set to BIO_SOCK_REUSEADDR option it will try to reuse the port anyway.
diff --git a/crypto/bio/bss_mem.c b/crypto/bio/bss_mem.c
index a61ab7cc64..6dc075dc61 100644
--- a/crypto/bio/bss_mem.c
+++ b/crypto/bio/bss_mem.c
@@ -92,7 +92,7 @@ BIO *BIO_new_mem_buf(const void *buf, int len)
b->max = sz;
*bb->readp = *bb->buf;
ret->flags |= BIO_FLAGS_MEM_RDONLY;
- /* Since this is static data retrying wont help */
+ /* Since this is static data retrying won't help */
ret->num = 0;
return ret;
}
diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S
index 2fdf5bbabe..f2404a3c1e 100644
--- a/crypto/bn/asm/ia64.S
+++ b/crypto/bn/asm/ia64.S
@@ -29,7 +29,7 @@
// ports is the same, i.e. 2, while I need 4. In other words, to this
// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
// essentially different in respect to this module, and a re-tune was
-// required. Well, because some intruction latencies has changed. Most
+// required. Well, because some instruction latencies has changed. Most
// noticeably those intensively used:
//
// Itanium Itanium2
@@ -370,7 +370,7 @@ bn_mul_words:
// The loop therefore spins at the latency of xma minus 1, or in other
// words at 6*(n+4) ticks:-( Compare to the "production" loop above
// that runs in 2*(n+11) where the low latency problem is worked around
-// by moving the dependency to one-tick latent interger ALU. Note that
+// by moving the dependency to one-tick latent integer ALU. Note that
// "distance" between ldf8 and xma is not latency of ldf8, but the
// *difference* between xma and ldf8 latencies.
.L_bn_mul_words_ctop:
@@ -432,7 +432,7 @@ bn_mul_add_words:
// version was performing *all* additions in IALU and was starving
// for those even on Itanium 2. In this version one addition is
// moved to FPU and is folded with multiplication. This is at cost
-// of propogating the result from previous call to this subroutine
+// of propagating the result from previous call to this subroutine
// to L2 cache... In other words negligible even for shorter keys.
// *Overall* performance improvement [over previous version] varies
// from 11 to 22 percent depending on key length.
diff --git a/crypto/bn/asm/mips.pl b/crypto/bn/asm/mips.pl
index e3a38bd140..420f01f3a4 100644
--- a/crypto/bn/asm/mips.pl
+++ b/crypto/bn/asm/mips.pl
@@ -22,7 +22,7 @@
# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
#
# The module is designed to work with either of the "new" MIPS ABI(5),
-# namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+# namely N32 or N64, offered by IRIX 6.x. It's not meant to work under
# IRIX 5.x not only because it doesn't support new ABIs but also
# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl
index 346e01faf5..4ea534a1c7 100644
--- a/crypto/bn/asm/ppc.pl
+++ b/crypto/bn/asm/ppc.pl
@@ -425,7 +425,7 @@ $data=<<EOF;
# r9,r10, r11 are the equivalents of c1,c2, c3.
#
# Possible optimization of loading all 8 longs of a into registers
-# doesnt provide any speedup
+# doesn't provide any speedup
#
xor r0,r0,r0 #set r0 = 0.Used in addze
@@ -1015,7 +1015,7 @@ $data=<<EOF;
$UMULL r8,r6,r7
$UMULH r9,r6,r7
addc r11,r11,r8
- addze r12,r9 # since we didnt set r12 to zero before.
+ addze r12,r9 # since we didn't set r12 to zero before.
addze r10,r0
#mul_add_c(a[1],b[0],c2,c3,c1);
$LD r6,`1*$BNSZ`(r4)
diff --git a/crypto/bn/asm/sparcv8plus.S b/crypto/bn/asm/sparcv8plus.S
index e77e67aa57..714a136675 100644
--- a/crypto/bn/asm/sparcv8plus.S
+++ b/crypto/bn/asm/sparcv8plus.S
@@ -52,7 +52,7 @@
* # cd ../..
* # make; make test
*
- * Q. V8plus achitecture? What kind of beast is that?
+ * Q. V8plus architecture? What kind of beast is that?
* A. Well, it's rather a programming model than an architecture...
* It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
* special conditions, namely when kernel doesn't preserve upper
diff --git a/crypto/bn/asm/sparcv9-mont.pl b/crypto/bn/asm/sparcv9-mont.pl
index 771cd96141..c36ce36806 100644
--- a/crypto/bn/asm/sparcv9-mont.pl
+++ b/crypto/bn/asm/sparcv9-mont.pl
@@ -20,7 +20,7 @@
# for undertaken effort are multiple. First of all, UltraSPARC is not
# the whole SPARCv9 universe and other VIS-free implementations deserve
# optimized code as much. Secondly, newly introduced UltraSPARC T1,
-# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
+# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive paths,
# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
# several integrated RSA/DSA accelerator circuits accessible through
# kernel driver [only(*)], but having decent user-land software
diff --git a/crypto/bn/asm/sparcv9a-mont.pl b/crypto/bn/asm/sparcv9a-mont.pl
index 902c0d3ad2..50b690653f 100755
--- a/crypto/bn/asm/sparcv9a-mont.pl
+++ b/crypto/bn/asm/sparcv9a-mont.pl
@@ -58,7 +58,7 @@
#
# Modulo-scheduled inner loops allow to interleave floating point and
# integer instructions and minimize Read-After-Write penalties. This
-# results in *further* 20-50% perfromance improvement [depending on
+# results in *further* 20-50% performance improvement [depending on
# key length, more for longer keys] on USI&II cores and 30-80% - on
# USIII&IV.
diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl
index 9994b0bf96..09296ec662 100755
--- a/crypto/bn/asm/x86-mont.pl
+++ b/crypto/bn/asm/x86-mont.pl
@@ -294,7 +294,7 @@ if (0) {
&xor ("eax","eax"); # signal "not fast enough [yet]"
&jmp (&label("just_leave"));
# While the below code provides competitive performance for
- # all key lengthes on modern Intel cores, it's still more
+ # all key lengths on modern Intel cores, it's still more
# than 10% slower for 4096-bit key elsewhere:-( "Competitive"
# means compared to the original integer-only assembler.
# 512-bit RSA sign is better by ~40%, but that's about all
diff --git a/crypto/chacha/asm/chacha-armv4.pl b/crypto/chacha/asm/chacha-armv4.pl
index 5b3e7be781..b5e21e4938 100755
--- a/crypto/chacha/asm/chacha-armv4.pl
+++ b/crypto/chacha/asm/chacha-armv4.pl
@@ -1134,7 +1134,7 @@ $code.=<<___;
ldrb @t[1],[r12],#1 @ read input
subs @t[3],@t[3],#1
eor @t[0],@t[0],@t[1]
- strb @t[0],[r14],#1 @ store ouput
+ strb @t[0],[r14],#1 @ store output
bne .Loop_tail_neon
.Ldone_neon:
diff --git a/crypto/ec/asm/ecp_nistz256-armv4.pl b/crypto/ec/asm/ecp_nistz256-armv4.pl
index 62761f8c96..73b7a55806 100755
--- a/crypto/ec/asm/ecp_nistz256-armv4.pl
+++ b/crypto/ec/asm/ecp_nistz256-armv4.pl
@@ -374,7 +374,7 @@ __ecp_nistz256_div_by_2:
mov $ff,$a0,lsl#31 @ place least significant bit to most
@ significant position, now arithmetic
@ right shift by 31 will produce -1 or
- @ 0, while logical rigth shift 1 or 0,
+ @ 0, while logical right shift 1 or 0,
@ this is how modulus is conditionally
@ synthesized in this case...
ldr $a3,[$a_ptr,#12]
diff --git a/crypto/include/internal/cryptlib_int.h b/crypto/include/internal/cryptlib_int.h
index aa02ddcd17..8e2a7199a1 100644
--- a/crypto/include/internal/cryptlib_int.h
+++ b/crypto/include/internal/cryptlib_int.h
@@ -20,7 +20,7 @@ int ossl_init_thread_start(uint64_t opts);
/*
* OPENSSL_INIT flags. The primary list of these is in crypto.h. Flags below
- * are those ommitted from crypto.h because they are "reserved for internal
+ * are those omitted from crypto.h because they are "reserved for internal
* use".
*/
# define OPENSSL_INIT_ZLIB 0x00010000L
diff --git a/crypto/modes/asm/ghash-x86.pl b/crypto/modes/asm/ghash-x86.pl
index 4eb0b2c6e5..cd8458256e 100644
--- a/crypto/modes/asm/ghash-x86.pl
+++ b/crypto/modes/asm/ghash-x86.pl
@@ -95,7 +95,7 @@
# where Tproc is time required for Karatsuba pre- and post-processing,
# is more realistic estimate. In this case it gives ... 1.91 cycles.
# Or in other words, depending on how well we can interleave reduction
-# and one of the two multiplications the performance should be betwen
+# and one of the two multiplications the performance should be between
# 1.91 and 2.16. As already mentioned, this implementation processes
# one byte out of 8KB buffer in 2.10 cycles, while x86_64 counterpart
# - in 2.02. x86_64 performance is better, because larger register
@@ -722,7 +722,7 @@ sub mmx_loop() {
&pxor ($red[1],$red[1]);
&pxor ($red[2],$red[2]);
- # Just like in "May" verson modulo-schedule for critical path in
+ # Just like in "May" version modulo-schedule for critical path in
# 'Z.hi ^= rem_8bit[Z.lo&0xff^((u8)H[nhi]<<4)]<<48'. Final 'pxor'
# is scheduled so late that rem_8bit[] has to be shifted *right*
# by 16, which is why last argument to pinsrw is 2, which
@@ -1148,7 +1148,7 @@ my ($Xhi,$Xi) = @_;
&movdqu (&QWP(0,$Xip),$Xi);
&function_end("gcm_ghash_clmul");
-} else { # Algorith 5. Kept for reference purposes.
+} else { # Algorithm 5. Kept for reference purposes.
sub reduction_alg5 { # 19/16 times faster than Intel version
my ($Xhi,$Xi)=@_;
diff --git a/crypto/ocsp/ocsp_cl.c b/crypto/ocsp/ocsp_cl.c
index 12d5bef574..0ae474b332 100644
--- a/crypto/ocsp/ocsp_cl.c
+++ b/crypto/ocsp/ocsp_cl.c
@@ -299,7 +299,7 @@ int OCSP_resp_find_status(OCSP_BASICRESP *bs, OCSP_CERTID *id, int *status,
/*
* Check validity of thisUpdate and nextUpdate fields. It is possible that
- * the request will take a few seconds to process and/or the time wont be
+ * the request will take a few seconds to process and/or the time won't be
* totally accurate. Therefore to avoid rejecting otherwise valid time we
* allow the times to be within 'nsec' of the current time. Also to avoid
* accepting very old responses without a nextUpdate field an optional maxage
diff --git a/crypto/pem/pem_lib.c b/crypto/pem/pem_lib.c
index 8965fda8d0..2792593cb2 100644
--- a/crypto/pem/pem_lib.c
+++ b/crypto/pem/pem_lib.c
@@ -151,7 +151,7 @@ static int check_pem(const char *nm, const char *name)
slen = pem_check_suffix(nm, "PRIVATE KEY");
if (slen > 0) {
/*
- * NB: ENGINE implementations wont contain a deprecated old
+ * NB: ENGINE implementations won't contain a deprecated old
* private key decode function so don't look for them.
*/
ameth = EVP_PKEY_asn1_find_str(NULL, nm, slen);
diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
index 8a06c77c83..ab65910282 100755
--- a/crypto/poly1305/asm/poly1305-ppc.pl
+++ b/crypto/poly1305/asm/poly1305-ppc.pl
@@ -62,7 +62,7 @@ if ($flavour =~ /64/) {
$PUSH ="stw";
} else { die "nonsense $flavour"; }
-# Define endianess based on flavour
+# Define endianness based on flavour
# i.e.: linux64le
$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
diff --git a/crypto/rand/rand_egd.c b/crypto/rand/rand_egd.c
index f4bfd8696c..fb6bad70dc 100644
--- a/crypto/rand/rand_egd.c
+++ b/crypto/rand/rand_egd.c
@@ -48,7 +48,7 @@ NON_EMPTY_TRANSLATION_UNIT
* the number of bytes requested or smaller, if the EGD pool is
* drained and the daemon signals that the pool is empty.
*
- * RAND_egd(path) will query 255 bytes and use the bytes retreived to seed
+ * RAND_egd(path) will query 255 bytes and use the bytes retrieved to seed
* the PRNG.
* RAND_egd() is a wrapper for RAND_egd_bytes() with numbytes=255.
*/
diff --git a/crypto/rc4/asm/rc4-586.pl b/crypto/rc4/asm/rc4-586.pl
index 936177778d..7d6f97c59e 100644
--- a/crypto/rc4/asm/rc4-586.pl
+++ b/crypto/rc4/asm/rc4-586.pl
@@ -157,7 +157,7 @@ if ($alt=0) {
&movd ($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4));
# (*) This is the key to Core2 and Westmere performance.
- # Whithout movz out-of-order execution logic confuses
+ # Without movz out-of-order execution logic confuses
# itself and fails to reorder loads and stores. Problem
# appears to be fixed in Sandy Bridge...
}
diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl
index 5ae0c6dd49..aaed2b1e61 100755
--- a/crypto/rc4/asm/rc4-x86_64.pl
+++ b/crypto/rc4/asm/rc4-x86_64.pl
@@ -57,7 +57,7 @@
# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
# performance by >30% [unlike P4 32-bit case that is]. But this is
# provided that loads are reordered even more aggressively! Both code
-# pathes, AMD64 and EM64T, reorder loads in essentially same manner
+# paths, AMD64 and EM64T, reorder loads in essentially same manner
# as my IA-64 implementation. On Opteron this resulted in modest 5%
# improvement [I had to test it], while final Intel P4 performance
# achieves respectful 432MBps on 2.8GHz processor now. For reference.
diff --git a/crypto/sha/asm/sha1-ppc.pl b/crypto/sha/asm/sha1-ppc.pl
index 7a66e0353e..add5a9ea5c 100755
--- a/crypto/sha/asm/sha1-ppc.pl
+++ b/crypto/sha/asm/sha1-ppc.pl
@@ -44,7 +44,7 @@ if ($flavour =~ /64/) {
$PUSH ="stw";
} else { die "nonsense $flavour"; }
-# Define endianess based on flavour
+# Define endianness based on flavour
# i.e.: linux64le
$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
index 3dbb23ae11..63a62656f6 100755
--- a/crypto/sha/asm/sha512-x86_64.pl
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -41,7 +41,7 @@
# level parallelism, on a given CPU implementation in this case.
#
# Special note on Intel EM64T. While Opteron CPU exhibits perfect
-# perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
+# performance ratio of 1.5 between 64- and 32-bit flavors [see above],
# [currently available] EM64T CPUs apparently are far from it. On the
# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
# sha256_block:-( This is presumably because 64-bit shifts/rotates
diff --git a/crypto/sparccpuid.S b/crypto/sparccpuid.S
index 4c394fa00d..c6ca224738 100644
--- a/crypto/sparccpuid.S
+++ b/crypto/sparccpuid.S
@@ -298,7 +298,7 @@ _sparcv9_vis1_instrument:
sub %o3,%o2,%o2
sub %o4,%o3,%o3
- ! find minumum value
+ ! find minimum value
cmp %o0,%o1
.word 0x38680002 !bgu,a %xcc,.+8
mov %o1,%o0
diff --git a/crypto/x509/x509_vfy.c b/crypto/x509/x509_vfy.c
index 28745741b2..36baeacb9d 100644
--- a/crypto/x509/x509_vfy.c
+++ b/crypto/x509/x509_vfy.c
@@ -889,7 +889,7 @@ static int check_cert(X509_STORE_CTX *ctx)
crl = NULL;
dcrl = NULL;
/*
- * If reasons not updated we wont get anywhere by another iteration,
+ * If reasons not updated we won't get anywhere by another iteration,
* so exit loop.
*/
if (last_reasons == ctx->current_reasons) {
diff --git a/crypto/x509/x_crl.c b/crypto/x509/x_crl.c
index f4bcea56d1..f94d04c37d 100644
--- a/crypto/x509/x_crl.c
+++ b/crypto/x509/x_crl.c
@@ -41,7 +41,7 @@ static const X509_CRL_METHOD *default_crl_method = &int_crl_meth;
/*
* The X509_CRL_INFO structure needs a bit of customisation. Since we cache
- * the original encoding the signature wont be affected by reordering of the
+ * the original encoding the signature won't be affected by reordering of the
* revoked field.
*/
static int crl_inf_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it,
diff --git a/crypto/x509v3/pcy_tree.c b/crypto/x509v3/pcy_tree.c
index df50d803e2..9f9246beae 100644
--- a/crypto/x509v3/pcy_tree.c
+++ b/crypto/x509v3/pcy_tree.c
@@ -129,7 +129,7 @@ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs,
* decremented for every non-self-issued certificate in the path, but may
* be further reduced by policy constraints in a non-leaf certificate.
*
- * The ultimate policy set is the interesection of all the policies along
+ * The ultimate policy set is the intersection of all the policies along
* the path, if we hit a certificate with an empty policy set, and explicit
* policy is required we're done.
*/