aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/sha
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/sha')
-rw-r--r--crypto/sha/Makefile.ssl47
-rw-r--r--crypto/sha/Makefile.uni122
-rw-r--r--crypto/sha/asm/README1
-rw-r--r--crypto/sha/asm/s1-win32.asm1664
-rw-r--r--crypto/sha/asm/sha1-586.pl491
-rw-r--r--crypto/sha/asm/sx86unix.cpp1948
-rw-r--r--crypto/sha/sha.c2
-rw-r--r--crypto/sha/sha.h6
-rw-r--r--crypto/sha/sha1.c135
-rw-r--r--crypto/sha/sha1_one.c4
-rw-r--r--crypto/sha/sha1dgst.c361
-rw-r--r--crypto/sha/sha1s.cpp79
-rw-r--r--crypto/sha/sha1test.c4
-rw-r--r--crypto/sha/sha_dgst.c340
-rw-r--r--crypto/sha/sha_locl.h82
-rw-r--r--crypto/sha/sha_one.c4
-rw-r--r--crypto/sha/sha_sgst.c246
-rw-r--r--crypto/sha/shatest.c2
18 files changed, 5231 insertions, 307 deletions
diff --git a/crypto/sha/Makefile.ssl b/crypto/sha/Makefile.ssl
index 3c3a9abd46..eeb545d140 100644
--- a/crypto/sha/Makefile.ssl
+++ b/crypto/sha/Makefile.ssl
@@ -2,16 +2,18 @@
# SSLeay/crypto/sha/Makefile
#
-DIR= sha
-TOP= ../..
-CC= cc
+DIR= sha
+TOP= ../..
+CC= cc
INCLUDES=
CFLAG=-g
INSTALLTOP=/usr/local/ssl
-MAKE= make -f Makefile.ssl
-MAKEDEPEND= makedepend -f Makefile.ssl
-MAKEFILE= Makefile.ssl
-AR= ar r
+MAKE= make -f Makefile.ssl
+MAKEDEPEND= makedepend -f Makefile.ssl
+MAKEFILE= Makefile.ssl
+AR= ar r
+
+SHA1_ASM_OBJ=
CFLAGS= $(INCLUDES) $(CFLAG)
@@ -21,25 +23,46 @@ APPS=
LIB=$(TOP)/libcrypto.a
LIBSRC=sha_dgst.c sha1dgst.c sha_one.c sha1_one.c
-LIBOBJ=sha_dgst.o sha1dgst.o sha_one.o sha1_one.o
+LIBOBJ=sha_dgst.o sha1dgst.o sha_one.o sha1_one.o $(SHA1_ASM_OBJ)
SRC= $(LIBSRC)
EXHEADER= sha.h
-HEADER= sha_locl.h $(EXHEADER)
+HEADER= sha_locl.h $(EXHEADER)
ALL= $(GENERAL) $(SRC) $(HEADER)
top:
(cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all)
-all: lib
+all: lib
-lib: $(LIBOBJ)
+lib: $(LIBOBJ)
$(AR) $(LIB) $(LIBOBJ)
sh $(TOP)/util/ranlib.sh $(LIB)
@touch lib
+# elf
+asm/sx86-elf.o: asm/sx86unix.cpp
+ $(CPP) -DELF asm/sx86unix.cpp | as -o asm/sx86-elf.o
+
+# solaris
+asm/sx86-sol.o: asm/sx86unix.cpp
+ $(CC) -E -DSOL asm/sx86unix.cpp | sed 's/^#.*//' > asm/sx86-sol.s
+ as -o asm/sx86-sol.o asm/sx86-sol.s
+ rm -f asm/sx86-sol.s
+
+# a.out
+asm/sx86-out.o: asm/sx86unix.cpp
+ $(CPP) -DOUT asm/sx86unix.cpp | as -o asm/sx86-out.o
+
+# bsdi
+asm/sx86bsdi.o: asm/sx86unix.cpp
+ $(CPP) -DBSDI asm/sx86unix.cpp | as -o asm/sx86bsdi.o
+
+asm/sx86unix.cpp:
+ (cd asm; perl sha1-586.pl cpp >sx86unix.cpp)
+
files:
perl $(TOP)/util/files.pl Makefile.ssl >> $(TOP)/MINFO
@@ -73,7 +96,7 @@ dclean:
mv -f Makefile.new $(MAKEFILE)
clean:
- /bin/rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff
+ /bin/rm -f *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff asm/*.o
errors:
diff --git a/crypto/sha/Makefile.uni b/crypto/sha/Makefile.uni
new file mode 100644
index 0000000000..f3236755b2
--- /dev/null
+++ b/crypto/sha/Makefile.uni
@@ -0,0 +1,122 @@
+# Targets
+# make - twidle the options yourself :-)
+# make cc - standard cc options
+# make gcc - standard gcc options
+# make x86-elf - linux-elf etc
+# make x86-out - linux-a.out, FreeBSD etc
+# make x86-solaris
+# make x86-bdsi
+
+DIR= sha
+TOP= .
+CC= gcc
+CFLAG= -O3 -fomit-frame-pointer
+
+CPP= $(CC) -E
+INCLUDES=
+INSTALLTOP=/usr/local/lib
+MAKE= make
+MAKEDEPEND= makedepend
+MAKEFILE= Makefile.uni
+AR= ar r
+
+SHA_ASM_OBJ=
+
+CFLAGS= $(INCLUDES) $(CFLAG)
+
+GENERAL=Makefile
+
+TEST1=shatest
+TEST2=sha1test
+APP1=sha
+APP2=sha1
+
+TEST=$(TEST1) $(TEST2)
+APPS=$(APP1) $(APP2)
+
+LIB=libsha.a
+LIBSRC=sha_dgst.c sha1dgst.c sha_one.c sha1_one.c
+LIBOBJ=sha_dgst.o sha1dgst.o sha_one.o sha1_one.o $(SHA_ASM_OBJ)
+
+SRC= $(LIBSRC)
+
+EXHEADER= sha.h
+HEADER= sha_locl.h $(EXHEADER)
+
+ALL= $(GENERAL) $(SRC) $(HEADER)
+
+all: $(LIB) $(TEST) $(APPS)
+
+$(LIB): $(LIBOBJ)
+ $(AR) $(LIB) $(LIBOBJ)
+ sh $(TOP)/ranlib.sh $(LIB)
+
+# elf
+asm/sx86-elf.o: asm/sx86unix.cpp
+ $(CPP) -DELF asm/sx86unix.cpp | as -o asm/sx86-elf.o
+
+# solaris
+asm/sx86-sol.o: asm/sx86unix.cpp
+ $(CC) -E -DSOL asm/sx86unix.cpp | sed 's/^#.*//' > asm/sx86-sol.s
+ as -o asm/sx86-sol.o asm/sx86-sol.s
+ rm -f asm/sx86-sol.s
+
+# a.out
+asm/sx86-out.o: asm/sx86unix.cpp
+ $(CPP) -DOUT asm/sx86unix.cpp | as -o asm/sx86-out.o
+
+# bsdi
+asm/sx86bsdi.o: asm/sx86unix.cpp
+ $(CPP) -DBSDI asm/sx86unix.cpp | as -o asm/sx86bsdi.o
+
+asm/sx86unix.cpp:
+ (cd asm; perl sha1-586.pl cpp >sx86unix.cpp)
+
+test: $(TEST)
+ ./$(TEST1)
+ ./$(TEST2)
+
+$(TEST1): $(TEST1).c $(LIB)
+ $(CC) -o $(TEST1) $(CFLAGS) $(TEST1).c $(LIB)
+
+$(TEST2): $(TEST2).c $(LIB)
+ $(CC) -o $(TEST2) $(CFLAGS) $(TEST2).c $(LIB)
+
+$(APP1): $(APP1).c $(LIB)
+ $(CC) -o $(APP1) $(CFLAGS) $(APP1).c $(LIB)
+
+$(APP2): $(APP2).c $(LIB)
+ $(CC) -o $(APP2) $(CFLAGS) $(APP2).c $(LIB)
+
+lint:
+ lint -DLINT $(INCLUDES) $(SRC)>fluff
+
+depend:
+ $(MAKEDEPEND) $(INCLUDES) $(PROGS) $(LIBSRC)
+
+dclean:
+ perl -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new
+ mv -f Makefile.new $(MAKEFILE)
+
+clean:
+ /bin/rm -f $(LIB) $(TEST) $(APPS) *.o asm/*.o *.obj lib tags core .pure .nfs* *.old *.bak fluff
+
+cc:
+ $(MAKE) SHA_ASM_OBJ="" CC="cc" CFLAG="-O" all
+
+gcc:
+ $(MAKE) SHA_ASM_OBJ="" CC="gcc" CFLAGS="-O3 -fomit-frame-pointer" all
+
+x86-elf:
+ $(MAKE) SHA_ASM_OBJ="asm/sx86-elf.o" CFLAG="-DELF -DSHA1_ASM -DL_ENDIAN $(CFLAGS)" all
+
+x86-out:
+ $(MAKE) SHA_ASM_OBJ="asm/sx86-out.o" CFLAG="-DOUT -DSHA1_ASM -DL_ENDIAN $(CFLAGS)" all
+
+x86-solaris:
+ $(MAKE) SHA_ASM_OBJ="asm/sx86-sol.o" CFLAG="-DSOL -DSHA1_ASM -DL_ENDIAN $(CFLAGS)" all
+
+x86-bdsi:
+ $(MAKE) SHA_ASM_OBJ="asm/sx86-bdsi.o" CFLAG="-DBDSI -DSHA1_ASM -DL_ENDIAN $(CFLAGS)" all
+
+# DO NOT DELETE THIS LINE -- make depend depends on it.
diff --git a/crypto/sha/asm/README b/crypto/sha/asm/README
new file mode 100644
index 0000000000..b7e755765f
--- /dev/null
+++ b/crypto/sha/asm/README
@@ -0,0 +1 @@
+C2.pl works
diff --git a/crypto/sha/asm/s1-win32.asm b/crypto/sha/asm/s1-win32.asm
new file mode 100644
index 0000000000..61335666b9
--- /dev/null
+++ b/crypto/sha/asm/s1-win32.asm
@@ -0,0 +1,1664 @@
+ ; Don't even think of reading this code
+ ; It was automatically generated by sha1-586.pl
+ ; Which is a perl program used to generate the x86 assember for
+ ; any of elf, a.out, BSDI,Win32, or Solaris
+ ; eric <eay@cryptsoft.com>
+ ;
+ TITLE sha1-586.asm
+ .486
+.model FLAT
+_TEXT SEGMENT
+PUBLIC _sha1_block_x86
+
+_sha1_block_x86 PROC NEAR
+ push esi
+ push ebp
+ mov eax, DWORD PTR 20[esp]
+ mov esi, DWORD PTR 16[esp]
+ add eax, esi
+ mov ebp, DWORD PTR 12[esp]
+ push ebx
+ sub eax, 64
+ push edi
+ mov ebx, DWORD PTR 4[ebp]
+ sub esp, 72
+ mov edx, DWORD PTR 12[ebp]
+ mov edi, DWORD PTR 16[ebp]
+ mov ecx, DWORD PTR 8[ebp]
+ mov DWORD PTR 68[esp],eax
+ ; First we need to setup the X array
+ mov eax, DWORD PTR [esi]
+L000start:
+ ; First, load the words onto the stack in network byte order
+ bswap eax
+ mov DWORD PTR [esp],eax
+ mov eax, DWORD PTR 4[esi]
+ bswap eax
+ mov DWORD PTR 4[esp],eax
+ mov eax, DWORD PTR 8[esi]
+ bswap eax
+ mov DWORD PTR 8[esp],eax
+ mov eax, DWORD PTR 12[esi]
+ bswap eax
+ mov DWORD PTR 12[esp],eax
+ mov eax, DWORD PTR 16[esi]
+ bswap eax
+ mov DWORD PTR 16[esp],eax
+ mov eax, DWORD PTR 20[esi]
+ bswap eax
+ mov DWORD PTR 20[esp],eax
+ mov eax, DWORD PTR 24[esi]
+ bswap eax
+ mov DWORD PTR 24[esp],eax
+ mov eax, DWORD PTR 28[esi]
+ bswap eax
+ mov DWORD PTR 28[esp],eax
+ mov eax, DWORD PTR 32[esi]
+ bswap eax
+ mov DWORD PTR 32[esp],eax
+ mov eax, DWORD PTR 36[esi]
+ bswap eax
+ mov DWORD PTR 36[esp],eax
+ mov eax, DWORD PTR 40[esi]
+ bswap eax
+ mov DWORD PTR 40[esp],eax
+ mov eax, DWORD PTR 44[esi]
+ bswap eax
+ mov DWORD PTR 44[esp],eax
+ mov eax, DWORD PTR 48[esi]
+ bswap eax
+ mov DWORD PTR 48[esp],eax
+ mov eax, DWORD PTR 52[esi]
+ bswap eax
+ mov DWORD PTR 52[esp],eax
+ mov eax, DWORD PTR 56[esi]
+ bswap eax
+ mov DWORD PTR 56[esp],eax
+ mov eax, DWORD PTR 60[esi]
+ bswap eax
+ mov DWORD PTR 60[esp],eax
+ ; We now have the X array on the stack
+ ; starting at sp-4
+ mov DWORD PTR 64[esp],esi
+ ;
+ ; Start processing
+ mov eax, DWORD PTR [ebp]
+ ; 00_15 0
+ mov esi, ecx
+ mov ebp, eax
+ xor esi, edx
+ rol ebp, 5
+ and esi, ebx
+ add ebp, edi
+ ror ebx, 1
+ mov edi, DWORD PTR [esp]
+ ror ebx, 1
+ xor esi, edx
+ lea ebp, DWORD PTR 1518500249[edi*1+ebp]
+ mov edi, ebx
+ add esi, ebp
+ xor edi, ecx
+ mov ebp, esi
+ and edi, eax
+ rol ebp, 5
+ add ebp, edx
+ mov edx, DWORD PTR 4[esp]
+ ror eax, 1
+ xor edi, ecx
+ ror eax, 1
+ lea ebp, DWORD PTR 1518500249[edx*1+ebp]
+ add edi, ebp
+ ; 00_15 2
+ mov edx, eax
+ mov ebp, edi
+ xor edx, ebx
+ rol ebp, 5
+ and edx, esi
+ add ebp, ecx
+ ror esi, 1
+ mov ecx, DWORD PTR 8[esp]
+ ror esi, 1
+ xor edx, ebx
+ lea ebp, DWORD PTR 1518500249[ecx*1+ebp]
+ mov ecx, esi
+ add edx, ebp
+ xor ecx, eax
+ mov ebp, edx
+ and ecx, edi
+ rol ebp, 5
+ add ebp, ebx
+ mov ebx, DWORD PTR 12[esp]
+ ror edi, 1
+ xor ecx, eax
+ ror edi, 1
+ lea ebp, DWORD PTR 1518500249[ebx*1+ebp]
+ add ecx, ebp
+ ; 00_15 4
+ mov ebx, edi
+ mov ebp, ecx
+ xor ebx, esi
+ rol ebp, 5
+ and ebx, edx
+ add ebp, eax
+ ror edx, 1
+ mov eax, DWORD PTR 16[esp]
+ ror edx, 1
+ xor ebx, esi
+ lea ebp, DWORD PTR 1518500249[eax*1+ebp]
+ mov eax, edx
+ add ebx, ebp
+ xor eax, edi
+ mov ebp, ebx
+ and eax, ecx
+ rol ebp, 5
+ add ebp, esi
+ mov esi, DWORD PTR 20[esp]
+ ror ecx, 1
+ xor eax, edi
+ ror ecx, 1
+ lea ebp, DWORD PTR 1518500249[esi*1+ebp]
+ add eax, ebp
+ ; 00_15 6
+ mov esi, ecx
+ mov ebp, eax
+ xor esi, edx
+ rol ebp, 5
+ and esi, ebx
+ add ebp, edi
+ ror ebx, 1
+ mov edi, DWORD PTR 24[esp]
+ ror ebx, 1
+ xor esi, edx
+ lea ebp, DWORD PTR 1518500249[edi*1+ebp]
+ mov edi, ebx
+ add esi, ebp
+ xor edi, ecx
+ mov ebp, esi
+ and edi, eax
+ rol ebp, 5
+ add ebp, edx
+ mov edx, DWORD PTR 28[esp]
+ ror eax, 1
+ xor edi, ecx
+ ror eax, 1
+ lea ebp, DWORD PTR 1518500249[edx*1+ebp]
+ add edi, ebp
+ ; 00_15 8
+ mov edx, eax
+ mov ebp, edi
+ xor edx, ebx
+ rol ebp, 5
+ and edx, esi
+ add ebp, ecx
+ ror esi, 1
+ mov ecx, DWORD PTR 32[esp]
+ ror esi, 1
+ xor edx, ebx
+ lea ebp, DWORD PTR 1518500249[ecx*1+ebp]
+ mov ecx, esi
+ add edx, ebp
+ xor ecx, eax
+ mov ebp, edx
+ and ecx, edi
+ rol ebp, 5
+ add ebp, ebx
+ mov ebx, DWORD PTR 36[esp]
+ ror edi, 1
+ xor ecx, eax
+ ror edi, 1
+ lea ebp, DWORD PTR 1518500249[ebx*1+ebp]
+ add ecx, ebp
+ ; 00_15 10
+ mov ebx, edi
+ mov ebp, ecx
+ xor ebx, esi
+ rol ebp, 5
+ and ebx, edx
+ add ebp, eax
+ ror edx, 1
+ mov eax, DWORD PTR 40[esp]
+ ror edx, 1
+ xor ebx, esi
+ lea ebp, DWORD PTR 1518500249[eax*1+ebp]
+ mov eax, edx
+ add ebx, ebp
+ xor eax, edi
+ mov ebp, ebx
+ and eax, ecx
+ rol ebp, 5
+ add ebp, esi
+ mov esi, DWORD PTR 44[esp]
+ ror ecx, 1
+ xor eax, edi
+ ror ecx, 1
+ lea ebp, DWORD PTR 1518500249[esi*1+ebp]
+ add eax, ebp
+ ; 00_15 12
+ mov esi, ecx
+ mov ebp, eax
+ xor esi, edx
+ rol ebp, 5
+ and esi, ebx
+ add ebp, edi
+ ror ebx, 1
+ mov edi, DWORD PTR 48[esp]
+ ror ebx, 1
+ xor esi, edx
+ lea ebp, DWORD PTR 1518500249[edi*1+ebp]
+ mov edi, ebx
+ add esi, ebp
+ xor edi, ecx
+ mov ebp, esi
+ and edi, eax
+ rol ebp, 5
+ add ebp, edx
+ mov edx, DWORD PTR 52[esp]
+ ror eax, 1
+ xor edi, ecx
+ ror eax, 1
+ lea ebp, DWORD PTR 1518500249[edx*1+ebp]
+ add edi, ebp
+ ; 00_15 14
+ mov edx, eax
+ mov ebp, edi
+ xor edx, ebx
+ rol ebp, 5
+ and edx, esi
+ add ebp, ecx
+ ror esi, 1
+ mov ecx, DWORD PTR 56[esp]
+ ror esi, 1
+ xor edx, ebx
+ lea ebp, DWORD PTR 1518500249[ecx*1+ebp]
+ mov ecx, esi
+ add edx, ebp
+ xor ecx, eax
+ mov ebp, edx
+ and ecx, edi
+ rol ebp, 5
+ add ebp, ebx
+ mov ebx, DWORD PTR 60[esp]
+ ror edi, 1
+ xor ecx, eax
+ ror edi, 1
+ lea ebp, DWORD PTR 1518500249[ebx*1+ebp]
+ add ecx, ebp
+ ; 16_19 16
+ nop
+ mov ebp, DWORD PTR [esp]
+ mov ebx, DWORD PTR 8[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor ebx, ebp
+ mov ebp, edi
+ rol ebx, 1
+ xor ebp, esi
+ mov DWORD PTR [esp],ebx
+ and ebp, edx
+ lea ebx, DWORD PTR 1518500249[eax*1+ebx]
+ xor ebp, esi
+ mov eax, ecx
+ add ebx, ebp
+ rol eax, 5
+ ror edx, 1
+ add ebx, eax
+ mov eax, DWORD PTR 4[esp]
+ mov ebp, DWORD PTR 12[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 56[esp]
+ ror edx, 1
+ xor eax, ebp
+ rol eax, 1
+ mov ebp, edx
+ xor ebp, edi
+ mov DWORD PTR 4[esp],eax
+ and ebp, ecx
+ lea eax, DWORD PTR 1518500249[esi*1+eax]
+ xor ebp, edi
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add eax, esi
+ ror ecx, 1
+ add eax, ebp
+ ; 16_19 18
+ mov ebp, DWORD PTR 8[esp]
+ mov esi, DWORD PTR 16[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor esi, ebp
+ mov ebp, ecx
+ rol esi, 1
+ xor ebp, edx
+ mov DWORD PTR 8[esp],esi
+ and ebp, ebx
+ lea esi, DWORD PTR 1518500249[edi*1+esi]
+ xor ebp, edx
+ mov edi, eax
+ add esi, ebp
+ rol edi, 5
+ ror ebx, 1
+ add esi, edi
+ mov edi, DWORD PTR 12[esp]
+ mov ebp, DWORD PTR 20[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR [esp]
+ ror ebx, 1
+ xor edi, ebp
+ rol edi, 1
+ mov ebp, ebx
+ xor ebp, ecx
+ mov DWORD PTR 12[esp],edi
+ and ebp, eax
+ lea edi, DWORD PTR 1518500249[edx*1+edi]
+ xor ebp, ecx
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edi, edx
+ ror eax, 1
+ add edi, ebp
+ ; 20_39 20
+ mov edx, DWORD PTR 16[esp]
+ mov ebp, DWORD PTR 24[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 16[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 1859775393[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 21
+ mov ecx, DWORD PTR 20[esp]
+ mov ebp, DWORD PTR 28[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 20[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 1859775393[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 22
+ mov ebx, DWORD PTR 24[esp]
+ mov ebp, DWORD PTR 32[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR 24[esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 1859775393[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 23
+ mov eax, DWORD PTR 28[esp]
+ mov ebp, DWORD PTR 36[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 28[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 1859775393[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 24
+ mov esi, DWORD PTR 32[esp]
+ mov ebp, DWORD PTR 40[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR [esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 32[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 1859775393[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 25
+ mov edi, DWORD PTR 36[esp]
+ mov ebp, DWORD PTR 44[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 36[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 1859775393[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 26
+ mov edx, DWORD PTR 40[esp]
+ mov ebp, DWORD PTR 48[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 40[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 1859775393[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 27
+ mov ecx, DWORD PTR 44[esp]
+ mov ebp, DWORD PTR 52[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 44[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 1859775393[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 28
+ mov ebx, DWORD PTR 48[esp]
+ mov ebp, DWORD PTR 56[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR 48[esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 1859775393[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 29
+ mov eax, DWORD PTR 52[esp]
+ mov ebp, DWORD PTR 60[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 52[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 1859775393[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 30
+ mov esi, DWORD PTR 56[esp]
+ mov ebp, DWORD PTR [esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 56[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 1859775393[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 31
+ mov edi, DWORD PTR 60[esp]
+ mov ebp, DWORD PTR 4[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 60[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 1859775393[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 32
+ mov edx, DWORD PTR [esp]
+ mov ebp, DWORD PTR 8[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR [esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 1859775393[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 33
+ mov ecx, DWORD PTR 4[esp]
+ mov ebp, DWORD PTR 12[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 4[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 1859775393[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 34
+ mov ebx, DWORD PTR 8[esp]
+ mov ebp, DWORD PTR 16[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR 8[esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 1859775393[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 35
+ mov eax, DWORD PTR 12[esp]
+ mov ebp, DWORD PTR 20[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR [esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 12[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 1859775393[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 36
+ mov esi, DWORD PTR 16[esp]
+ mov ebp, DWORD PTR 24[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 16[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 1859775393[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 37
+ mov edi, DWORD PTR 20[esp]
+ mov ebp, DWORD PTR 28[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 20[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 1859775393[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 38
+ mov edx, DWORD PTR 24[esp]
+ mov ebp, DWORD PTR 32[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 24[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 1859775393[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 39
+ mov ecx, DWORD PTR 28[esp]
+ mov ebp, DWORD PTR 36[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 28[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 1859775393[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 40_59 40
+ mov ebx, DWORD PTR 32[esp]
+ mov ebp, DWORD PTR 40[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR [esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ or ebp, edi
+ mov DWORD PTR 32[esp],ebx
+ and ebp, esi
+ lea ebx, DWORD PTR 2400959708[eax*1+ebx]
+ mov eax, edx
+ ror edx, 1
+ and eax, edi
+ or ebp, eax
+ mov eax, ecx
+ rol eax, 5
+ add ebp, eax
+ mov eax, DWORD PTR 36[esp]
+ add ebx, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 24[esp]
+ ror edx, 1
+ xor eax, ebp
+ rol eax, 1
+ mov ebp, ecx
+ mov DWORD PTR 36[esp],eax
+ or ebp, edx
+ lea eax, DWORD PTR 2400959708[esi*1+eax]
+ mov esi, ecx
+ and ebp, edi
+ and esi, edx
+ or ebp, esi
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add ebp, esi
+ ror ecx, 1
+ add eax, ebp
+ ; 40_59 41
+ ; 40_59 42
+ mov esi, DWORD PTR 40[esp]
+ mov ebp, DWORD PTR 48[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ or ebp, ecx
+ mov DWORD PTR 40[esp],esi
+ and ebp, edx
+ lea esi, DWORD PTR 2400959708[edi*1+esi]
+ mov edi, ebx
+ ror ebx, 1
+ and edi, ecx
+ or ebp, edi
+ mov edi, eax
+ rol edi, 5
+ add ebp, edi
+ mov edi, DWORD PTR 44[esp]
+ add esi, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 32[esp]
+ ror ebx, 1
+ xor edi, ebp
+ rol edi, 1
+ mov ebp, eax
+ mov DWORD PTR 44[esp],edi
+ or ebp, ebx
+ lea edi, DWORD PTR 2400959708[edx*1+edi]
+ mov edx, eax
+ and ebp, ecx
+ and edx, ebx
+ or ebp, edx
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add ebp, edx
+ ror eax, 1
+ add edi, ebp
+ ; 40_59 43
+ ; 40_59 44
+ mov edx, DWORD PTR 48[esp]
+ mov ebp, DWORD PTR 56[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ or ebp, eax
+ mov DWORD PTR 48[esp],edx
+ and ebp, ebx
+ lea edx, DWORD PTR 2400959708[ecx*1+edx]
+ mov ecx, esi
+ ror esi, 1
+ and ecx, eax
+ or ebp, ecx
+ mov ecx, edi
+ rol ecx, 5
+ add ebp, ecx
+ mov ecx, DWORD PTR 52[esp]
+ add edx, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 40[esp]
+ ror esi, 1
+ xor ecx, ebp
+ rol ecx, 1
+ mov ebp, edi
+ mov DWORD PTR 52[esp],ecx
+ or ebp, esi
+ lea ecx, DWORD PTR 2400959708[ebx*1+ecx]
+ mov ebx, edi
+ and ebp, eax
+ and ebx, esi
+ or ebp, ebx
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebp, ebx
+ ror edi, 1
+ add ecx, ebp
+ ; 40_59 45
+ ; 40_59 46
+ mov ebx, DWORD PTR 56[esp]
+ mov ebp, DWORD PTR [esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ or ebp, edi
+ mov DWORD PTR 56[esp],ebx
+ and ebp, esi
+ lea ebx, DWORD PTR 2400959708[eax*1+ebx]
+ mov eax, edx
+ ror edx, 1
+ and eax, edi
+ or ebp, eax
+ mov eax, ecx
+ rol eax, 5
+ add ebp, eax
+ mov eax, DWORD PTR 60[esp]
+ add ebx, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 48[esp]
+ ror edx, 1
+ xor eax, ebp
+ rol eax, 1
+ mov ebp, ecx
+ mov DWORD PTR 60[esp],eax
+ or ebp, edx
+ lea eax, DWORD PTR 2400959708[esi*1+eax]
+ mov esi, ecx
+ and ebp, edi
+ and esi, edx
+ or ebp, esi
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add ebp, esi
+ ror ecx, 1
+ add eax, ebp
+ ; 40_59 47
+ ; 40_59 48
+ mov esi, DWORD PTR [esp]
+ mov ebp, DWORD PTR 8[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ or ebp, ecx
+ mov DWORD PTR [esp],esi
+ and ebp, edx
+ lea esi, DWORD PTR 2400959708[edi*1+esi]
+ mov edi, ebx
+ ror ebx, 1
+ and edi, ecx
+ or ebp, edi
+ mov edi, eax
+ rol edi, 5
+ add ebp, edi
+ mov edi, DWORD PTR 4[esp]
+ add esi, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 56[esp]
+ ror ebx, 1
+ xor edi, ebp
+ rol edi, 1
+ mov ebp, eax
+ mov DWORD PTR 4[esp],edi
+ or ebp, ebx
+ lea edi, DWORD PTR 2400959708[edx*1+edi]
+ mov edx, eax
+ and ebp, ecx
+ and edx, ebx
+ or ebp, edx
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add ebp, edx
+ ror eax, 1
+ add edi, ebp
+ ; 40_59 49
+ ; 40_59 50
+ mov edx, DWORD PTR 8[esp]
+ mov ebp, DWORD PTR 16[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ or ebp, eax
+ mov DWORD PTR 8[esp],edx
+ and ebp, ebx
+ lea edx, DWORD PTR 2400959708[ecx*1+edx]
+ mov ecx, esi
+ ror esi, 1
+ and ecx, eax
+ or ebp, ecx
+ mov ecx, edi
+ rol ecx, 5
+ add ebp, ecx
+ mov ecx, DWORD PTR 12[esp]
+ add edx, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR [esp]
+ ror esi, 1
+ xor ecx, ebp
+ rol ecx, 1
+ mov ebp, edi
+ mov DWORD PTR 12[esp],ecx
+ or ebp, esi
+ lea ecx, DWORD PTR 2400959708[ebx*1+ecx]
+ mov ebx, edi
+ and ebp, eax
+ and ebx, esi
+ or ebp, ebx
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebp, ebx
+ ror edi, 1
+ add ecx, ebp
+ ; 40_59 51
+ ; 40_59 52
+ mov ebx, DWORD PTR 16[esp]
+ mov ebp, DWORD PTR 24[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ or ebp, edi
+ mov DWORD PTR 16[esp],ebx
+ and ebp, esi
+ lea ebx, DWORD PTR 2400959708[eax*1+ebx]
+ mov eax, edx
+ ror edx, 1
+ and eax, edi
+ or ebp, eax
+ mov eax, ecx
+ rol eax, 5
+ add ebp, eax
+ mov eax, DWORD PTR 20[esp]
+ add ebx, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 8[esp]
+ ror edx, 1
+ xor eax, ebp
+ rol eax, 1
+ mov ebp, ecx
+ mov DWORD PTR 20[esp],eax
+ or ebp, edx
+ lea eax, DWORD PTR 2400959708[esi*1+eax]
+ mov esi, ecx
+ and ebp, edi
+ and esi, edx
+ or ebp, esi
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add ebp, esi
+ ror ecx, 1
+ add eax, ebp
+ ; 40_59 53
+ ; 40_59 54
+ mov esi, DWORD PTR 24[esp]
+ mov ebp, DWORD PTR 32[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ or ebp, ecx
+ mov DWORD PTR 24[esp],esi
+ and ebp, edx
+ lea esi, DWORD PTR 2400959708[edi*1+esi]
+ mov edi, ebx
+ ror ebx, 1
+ and edi, ecx
+ or ebp, edi
+ mov edi, eax
+ rol edi, 5
+ add ebp, edi
+ mov edi, DWORD PTR 28[esp]
+ add esi, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 16[esp]
+ ror ebx, 1
+ xor edi, ebp
+ rol edi, 1
+ mov ebp, eax
+ mov DWORD PTR 28[esp],edi
+ or ebp, ebx
+ lea edi, DWORD PTR 2400959708[edx*1+edi]
+ mov edx, eax
+ and ebp, ecx
+ and edx, ebx
+ or ebp, edx
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add ebp, edx
+ ror eax, 1
+ add edi, ebp
+ ; 40_59 55
+ ; 40_59 56
+ mov edx, DWORD PTR 32[esp]
+ mov ebp, DWORD PTR 40[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR [esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ or ebp, eax
+ mov DWORD PTR 32[esp],edx
+ and ebp, ebx
+ lea edx, DWORD PTR 2400959708[ecx*1+edx]
+ mov ecx, esi
+ ror esi, 1
+ and ecx, eax
+ or ebp, ecx
+ mov ecx, edi
+ rol ecx, 5
+ add ebp, ecx
+ mov ecx, DWORD PTR 36[esp]
+ add edx, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 24[esp]
+ ror esi, 1
+ xor ecx, ebp
+ rol ecx, 1
+ mov ebp, edi
+ mov DWORD PTR 36[esp],ecx
+ or ebp, esi
+ lea ecx, DWORD PTR 2400959708[ebx*1+ecx]
+ mov ebx, edi
+ and ebp, eax
+ and ebx, esi
+ or ebp, ebx
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebp, ebx
+ ror edi, 1
+ add ecx, ebp
+ ; 40_59 57
+ ; 40_59 58
+ mov ebx, DWORD PTR 40[esp]
+ mov ebp, DWORD PTR 48[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ or ebp, edi
+ mov DWORD PTR 40[esp],ebx
+ and ebp, esi
+ lea ebx, DWORD PTR 2400959708[eax*1+ebx]
+ mov eax, edx
+ ror edx, 1
+ and eax, edi
+ or ebp, eax
+ mov eax, ecx
+ rol eax, 5
+ add ebp, eax
+ mov eax, DWORD PTR 44[esp]
+ add ebx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 32[esp]
+ ror edx, 1
+ xor eax, ebp
+ rol eax, 1
+ mov ebp, ecx
+ mov DWORD PTR 44[esp],eax
+ or ebp, edx
+ lea eax, DWORD PTR 2400959708[esi*1+eax]
+ mov esi, ecx
+ and ebp, edi
+ and esi, edx
+ or ebp, esi
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add ebp, esi
+ ror ecx, 1
+ add eax, ebp
+ ; 40_59 59
+ ; 20_39 60
+ mov esi, DWORD PTR 48[esp]
+ mov ebp, DWORD PTR 56[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 48[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 3395469782[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 61
+ mov edi, DWORD PTR 52[esp]
+ mov ebp, DWORD PTR 60[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 52[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 3395469782[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 62
+ mov edx, DWORD PTR 56[esp]
+ mov ebp, DWORD PTR [esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 56[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 3395469782[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 63
+ mov ecx, DWORD PTR 60[esp]
+ mov ebp, DWORD PTR 4[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 60[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 3395469782[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 64
+ mov ebx, DWORD PTR [esp]
+ mov ebp, DWORD PTR 8[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR [esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 3395469782[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 65
+ mov eax, DWORD PTR 4[esp]
+ mov ebp, DWORD PTR 12[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 4[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 3395469782[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 66
+ mov esi, DWORD PTR 8[esp]
+ mov ebp, DWORD PTR 16[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 8[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 3395469782[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 67
+ mov edi, DWORD PTR 12[esp]
+ mov ebp, DWORD PTR 20[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR [esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 12[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 3395469782[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 68
+ mov edx, DWORD PTR 16[esp]
+ mov ebp, DWORD PTR 24[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 16[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 3395469782[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 69
+ mov ecx, DWORD PTR 20[esp]
+ mov ebp, DWORD PTR 28[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 52[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 20[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 3395469782[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 70
+ mov ebx, DWORD PTR 24[esp]
+ mov ebp, DWORD PTR 32[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 56[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR 24[esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 3395469782[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 71
+ mov eax, DWORD PTR 28[esp]
+ mov ebp, DWORD PTR 36[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 60[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 28[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 3395469782[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 72
+ mov esi, DWORD PTR 32[esp]
+ mov ebp, DWORD PTR 40[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR [esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 32[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 3395469782[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 73
+ mov edi, DWORD PTR 36[esp]
+ mov ebp, DWORD PTR 44[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 4[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 36[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 3395469782[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ ror eax, 1
+ add edx, ebp
+ ror eax, 1
+ add edi, edx
+ ; 20_39 74
+ mov edx, DWORD PTR 40[esp]
+ mov ebp, DWORD PTR 48[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 8[esp]
+ xor edx, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor edx, ebp
+ mov ebp, esi
+ rol edx, 1
+ xor ebp, eax
+ mov DWORD PTR 40[esp],edx
+ xor ebp, ebx
+ lea edx, DWORD PTR 3395469782[ecx*1+edx]
+ mov ecx, edi
+ rol ecx, 5
+ ror esi, 1
+ add ecx, ebp
+ ror esi, 1
+ add edx, ecx
+ ; 20_39 75
+ mov ecx, DWORD PTR 44[esp]
+ mov ebp, DWORD PTR 52[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 12[esp]
+ xor ecx, ebp
+ mov ebp, DWORD PTR 32[esp]
+ xor ecx, ebp
+ mov ebp, edi
+ rol ecx, 1
+ xor ebp, esi
+ mov DWORD PTR 44[esp],ecx
+ xor ebp, eax
+ lea ecx, DWORD PTR 3395469782[ebx*1+ecx]
+ mov ebx, edx
+ rol ebx, 5
+ ror edi, 1
+ add ebx, ebp
+ ror edi, 1
+ add ecx, ebx
+ ; 20_39 76
+ mov ebx, DWORD PTR 48[esp]
+ mov ebp, DWORD PTR 56[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 16[esp]
+ xor ebx, ebp
+ mov ebp, DWORD PTR 36[esp]
+ xor ebx, ebp
+ mov ebp, edx
+ rol ebx, 1
+ xor ebp, edi
+ mov DWORD PTR 48[esp],ebx
+ xor ebp, esi
+ lea ebx, DWORD PTR 3395469782[eax*1+ebx]
+ mov eax, ecx
+ rol eax, 5
+ ror edx, 1
+ add eax, ebp
+ ror edx, 1
+ add ebx, eax
+ ; 20_39 77
+ mov eax, DWORD PTR 52[esp]
+ mov ebp, DWORD PTR 60[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 20[esp]
+ xor eax, ebp
+ mov ebp, DWORD PTR 40[esp]
+ xor eax, ebp
+ mov ebp, ecx
+ rol eax, 1
+ xor ebp, edx
+ mov DWORD PTR 52[esp],eax
+ xor ebp, edi
+ lea eax, DWORD PTR 3395469782[esi*1+eax]
+ mov esi, ebx
+ rol esi, 5
+ ror ecx, 1
+ add esi, ebp
+ ror ecx, 1
+ add eax, esi
+ ; 20_39 78
+ mov esi, DWORD PTR 56[esp]
+ mov ebp, DWORD PTR [esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 24[esp]
+ xor esi, ebp
+ mov ebp, DWORD PTR 44[esp]
+ xor esi, ebp
+ mov ebp, ebx
+ rol esi, 1
+ xor ebp, ecx
+ mov DWORD PTR 56[esp],esi
+ xor ebp, edx
+ lea esi, DWORD PTR 3395469782[edi*1+esi]
+ mov edi, eax
+ rol edi, 5
+ ror ebx, 1
+ add edi, ebp
+ ror ebx, 1
+ add esi, edi
+ ; 20_39 79
+ mov edi, DWORD PTR 60[esp]
+ mov ebp, DWORD PTR 4[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 28[esp]
+ xor edi, ebp
+ mov ebp, DWORD PTR 48[esp]
+ xor edi, ebp
+ mov ebp, eax
+ rol edi, 1
+ xor ebp, ebx
+ mov DWORD PTR 60[esp],edi
+ xor ebp, ecx
+ lea edi, DWORD PTR 3395469782[edx*1+edi]
+ mov edx, esi
+ rol edx, 5
+ add edx, ebp
+ mov ebp, DWORD PTR 92[esp]
+ ror eax, 1
+ add edi, edx
+ ror eax, 1
+ ; End processing
+ ;
+ mov edx, DWORD PTR 12[ebp]
+ add edx, ebx
+ mov ebx, DWORD PTR 4[ebp]
+ add ebx, esi
+ mov esi, eax
+ mov eax, DWORD PTR [ebp]
+ mov DWORD PTR 12[ebp],edx
+ add eax, edi
+ mov edi, DWORD PTR 16[ebp]
+ add edi, ecx
+ mov ecx, DWORD PTR 8[ebp]
+ add ecx, esi
+ mov DWORD PTR [ebp],eax
+ mov esi, DWORD PTR 64[esp]
+ mov DWORD PTR 8[ebp],ecx
+ add esi, 64
+ mov eax, DWORD PTR 68[esp]
+ mov DWORD PTR 16[ebp],edi
+ cmp eax, esi
+ mov DWORD PTR 4[ebp],ebx
+ jl $L001end
+ mov eax, DWORD PTR [esi]
+ jmp L000start
+$L001end:
+ add esp, 72
+ pop edi
+ pop ebx
+ pop ebp
+ pop esi
+ ret
+_sha1_block_x86 ENDP
+_TEXT ENDS
+END
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
new file mode 100644
index 0000000000..38bb27532d
--- /dev/null
+++ b/crypto/sha/asm/sha1-586.pl
@@ -0,0 +1,491 @@
+#!/usr/local/bin/perl
+
+$normal=0;
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"sha1-586.pl");
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$E="edi";
+$T="esi";
+$tmp1="ebp";
+
+$off=9*4;
+
+@K=(0x5a827999,0x6ed9eba1,0x8f1bbcdc,0xca62c1d6);
+
+&sha1_block("sha1_block_x86");
+
+&asm_finish();
+
+sub Nn
+ {
+ local($p)=@_;
+ local(%n)=($A,$T,$B,$A,$C,$B,$D,$C,$E,$D,$T,$E);
+ return($n{$p});
+ }
+
+sub Np
+ {
+ local($p)=@_;
+ local(%n)=($A,$T,$B,$A,$C,$B,$D,$C,$E,$D,$T,$E);
+ local(%n)=($A,$B,$B,$C,$C,$D,$D,$E,$E,$T,$T,$A);
+ return($n{$p});
+ }
+
+sub Na
+ {
+ local($n)=@_;
+ return( (($n )&0x0f),
+ (($n+ 2)&0x0f),
+ (($n+ 8)&0x0f),
+ (($n+13)&0x0f),
+ (($n+ 1)&0x0f));
+ }
+
+sub X_expand
+ {
+ local($in)=@_;
+
+ &comment("First, load the words onto the stack in network byte order");
+ for ($i=0; $i<16; $i++)
+ {
+ &mov("eax",&DWP(($i+0)*4,$in,"",0)) unless $i == 0;
+ &bswap("eax");
+ &mov(&swtmp($i+0),"eax");
+ }
+
+ &comment("We now have the X array on the stack");
+ &comment("starting at sp-4");
+ }
+
+# Rules of engagement
+# F is always trashable at the start, the running total.
+# E becomes the next F so it can be trashed after it has been 'accumulated'
+# F becomes A in the next round. We don't need to access it much.
+# During the X update part, the result ends up in $X[$n0].
+
+sub BODY_00_15
+ {
+ local($pos,$K,$X,$n,$a,$b,$c,$d,$e,$f)=@_;
+
+return if $n & 1;
+ &comment("00_15 $n");
+
+ &mov($f,$c);
+
+ &mov($tmp1,$a);
+ &xor($f,$d); # F2
+
+ &rotl($tmp1,5); # A2
+
+ &and($f,$b); # F3
+ &add($tmp1,$e);
+
+ &rotr($b,1); # B1 <- F
+ &mov($e,&swtmp($n)); # G1
+
+ &rotr($b,1); # B1 <- F
+ &xor($f,$d); # F4
+
+ &lea($tmp1,&DWP($K,$tmp1,$e,1));
+
+############################
+# &BODY_40_59( 0,$K[2],$X,42,$A,$B,$C,$D,$E,$T);
+# &BODY_40_59( 0,$K[2],$X,43,$T,$A,$B,$C,$D,$E);
+$n++;
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+ ($b,$c,$d,$e,$f,$a)=($a,$b,$c,$d,$e,$f);
+
+ &mov($f,$c);
+
+ &add($a,$tmp1); # MOVED DOWN
+ &xor($f,$d); # F2
+
+ &mov($tmp1,$a);
+ &and($f,$b); # F3
+
+ &rotl($tmp1,5); # A2
+
+ &add($tmp1,$e);
+ &mov($e,&swtmp($n)); # G1
+
+ &rotr($b,1); # B1 <- F
+ &xor($f,$d); # F4
+
+ &rotr($b,1); # B1 <- F
+ &lea($tmp1,&DWP($K,$tmp1,$e,1));
+
+ &add($f,$tmp1);
+ }
+
+sub BODY_16_19
+ {
+ local($pos,$K,$X,$n,$a,$b,$c,$d,$e,$f)=@_;
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+
+return if $n & 1;
+ &comment("16_19 $n");
+
+ &nop() if ($pos < 0);
+&mov($tmp1,&swtmp($n0)); # X1
+ &mov($f,&swtmp($n1)); # X2
+&xor($f,$tmp1); # X3
+ &mov($tmp1,&swtmp($n2)); # X4
+&xor($f,$tmp1); # X5
+ &mov($tmp1,&swtmp($n3)); # X6
+&xor($f,$tmp1); # X7 - slot
+ &mov($tmp1,$c); # F1
+&rotl($f,1); # X8 - slot
+ &xor($tmp1,$d); # F2
+&mov(&swtmp($n0),$f); # X9 - anytime
+ &and($tmp1,$b); # F3
+&lea($f,&DWP($K,$f,$e,1)); # tot=X+K+e
+ &xor($tmp1,$d); # F4
+&mov($e,$a); # A1
+ &add($f,$tmp1); # tot+=F();
+
+&rotl($e,5); # A2
+
+&rotr($b,1); # B1 <- F
+ &add($f,$e); # tot+=a
+
+############################
+# &BODY_40_59( 0,$K[2],$X,42,$A,$B,$C,$D,$E,$T);
+# &BODY_40_59( 0,$K[2],$X,43,$T,$A,$B,$C,$D,$E);
+$n++;
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+ ($b,$c,$d,$e,$f,$a)=($a,$b,$c,$d,$e,$f);
+
+
+&mov($f,&swtmp($n0)); # X1
+ &mov($tmp1,&swtmp($n1)); # X2
+&xor($f,$tmp1); # X3
+ &mov($tmp1,&swtmp($n2)); # X4
+&xor($f,$tmp1); # X5
+ &mov($tmp1,&swtmp($n3)); # X6
+&rotr($c,1); #&rotr($b,1); # B1 <- F # MOVED DOWN
+ &xor($f,$tmp1); # X7 - slot
+&rotl($f,1); # X8 - slot
+ &mov($tmp1,$c); # F1
+&xor($tmp1,$d); # F2
+ &mov(&swtmp($n0),$f); # X9 - anytime
+&and($tmp1,$b); # F3
+ &lea($f,&DWP($K,$f,$e,1)); # tot=X+K+e
+
+&xor($tmp1,$d); # F4
+ &mov($e,$a); # A1
+
+&rotl($e,5); # A2
+
+&rotr($b,1); # B1 <- F
+ &add($f,$e); # tot+=a
+
+&rotr($b,1); # B1 <- F
+ &add($f,$tmp1); # tot+=F();
+
+ }
+
+sub BODY_20_39
+ {
+ local($pos,$K,$X,$n,$a,$b,$c,$d,$e,$f)=@_;
+
+ &comment("20_39 $n");
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+
+&mov($f,&swtmp($n0)); # X1
+ &mov($tmp1,&swtmp($n1)); # X2
+&xor($f,$tmp1); # X3
+ &mov($tmp1,&swtmp($n2)); # X4
+&xor($f,$tmp1); # X5
+ &mov($tmp1,&swtmp($n3)); # X6
+&xor($f,$tmp1); # X7 - slot
+ &mov($tmp1,$b); # F1
+&rotl($f,1); # X8 - slot
+ &xor($tmp1,$c); # F2
+&mov(&swtmp($n0),$f); # X9 - anytime
+ &xor($tmp1,$d); # F3
+
+&lea($f,&DWP($K,$f,$e,1)); # tot=X+K+e
+ &mov($e,$a); # A1
+
+&rotl($e,5); # A2
+
+if ($n != 79) # last loop
+ {
+ &rotr($b,1); # B1 <- F
+ &add($e,$tmp1); # tmp1=F()+a
+
+ &rotr($b,1); # B2 <- F
+ &add($f,$e); # tot+=tmp1;
+ }
+else
+ {
+ &add($e,$tmp1); # tmp1=F()+a
+ &mov($tmp1,&wparam(0));
+
+ &rotr($b,1); # B1 <- F
+ &add($f,$e); # tot+=tmp1;
+
+ &rotr($b,1); # B2 <- F
+ }
+ }
+
+sub BODY_40_59
+ {
+ local($pos,$K,$X,$n,$a,$b,$c,$d,$e,$f)=@_;
+
+ &comment("40_59 $n");
+ return if $n & 1;
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+
+&mov($f,&swtmp($n0)); # X1
+ &mov($tmp1,&swtmp($n1)); # X2
+&xor($f,$tmp1); # X3
+ &mov($tmp1,&swtmp($n2)); # X4
+&xor($f,$tmp1); # X5
+ &mov($tmp1,&swtmp($n3)); # X6
+&xor($f,$tmp1); # X7 - slot
+ &mov($tmp1,$b); # F1
+&rotl($f,1); # X8 - slot
+ &or($tmp1,$c); # F2
+&mov(&swtmp($n0),$f); # X9 - anytime
+ &and($tmp1,$d); # F3
+
+&lea($f,&DWP($K,$f,$e,1)); # tot=X+K+e
+ &mov($e,$b); # F4
+
+&rotr($b,1); # B1 <- F
+ &and($e,$c); # F5
+
+&or($tmp1,$e); # F6
+ &mov($e,$a); # A1
+
+&rotl($e,5); # A2
+
+&add($tmp1,$e); # tmp1=F()+a
+
+############################
+# &BODY_40_59( 0,$K[2],$X,42,$A,$B,$C,$D,$E,$T);
+# &BODY_40_59( 0,$K[2],$X,43,$T,$A,$B,$C,$D,$E);
+$n++;
+ local($n0,$n1,$n2,$n3,$np)=&Na($n);
+ ($b,$c,$d,$e,$f,$a)=($a,$b,$c,$d,$e,$f);
+
+ &mov($f,&swtmp($n0)); # X1
+&add($a,$tmp1); # tot+=tmp1; # moved was add f,tmp1
+ &mov($tmp1,&swtmp($n1)); # X2
+&xor($f,$tmp1); # X3
+ &mov($tmp1,&swtmp($n2)); # X4
+&xor($f,$tmp1); # X5
+ &mov($tmp1,&swtmp($n3)); # X6
+&rotr($c,1); # B2 <- F # moved was rotr b,1
+ &xor($f,$tmp1); # X7 - slot
+&rotl($f,1); # X8 - slot
+ &mov($tmp1,$b); # F1
+&mov(&swtmp($n0),$f); # X9 - anytime
+ &or($tmp1,$c); # F2
+&lea($f,&DWP($K,$f,$e,1)); # tot=X+K+e
+ &mov($e,$b); # F4
+&and($tmp1,$d); # F3
+ &and($e,$c); # F5
+
+&or($tmp1,$e); # F6
+ &mov($e,$a); # A1
+
+&rotl($e,5); # A2
+
+&rotr($b,1); # B1 <- F
+ &add($tmp1,$e); # tmp1=F()+a
+
+&rotr($b,1); # B2 <- F
+ &add($f,$tmp1); # tot+=tmp1;
+ }
+
+sub BODY_60_79
+ {
+ &BODY_20_39(@_);
+ }
+
+sub sha1_block
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"");
+
+ # parameter 1 is the MD5_CTX structure.
+ # A 0
+ # B 4
+ # C 8
+ # D 12
+ # E 16
+
+ &push("esi");
+ &push("ebp");
+ &mov("eax", &wparam(2));
+ &mov("esi", &wparam(1));
+ &add("eax", "esi"); # offset to leave on
+ &mov("ebp", &wparam(0));
+ &push("ebx");
+ &sub("eax", 64);
+ &push("edi");
+ &mov($B, &DWP( 4,"ebp","",0));
+ &stack_push(18);
+ &mov($D, &DWP(12,"ebp","",0));
+ &mov($E, &DWP(16,"ebp","",0));
+ &mov($C, &DWP( 8,"ebp","",0));
+ &mov(&swtmp(17),"eax");
+
+ &comment("First we need to setup the X array");
+ &mov("eax",&DWP(0,"esi","",0)); # pulled out of X_expand
+
+ &set_label("start") unless $normal;
+
+ &X_expand("esi");
+ &mov(&swtmp(16),"esi");
+
+ &comment("");
+ &comment("Start processing");
+
+ # odd start
+ &mov($A, &DWP( 0,"ebp","",0));
+ $X="esp";
+ &BODY_00_15(-2,$K[0],$X, 0,$A,$B,$C,$D,$E,$T);
+ &BODY_00_15( 0,$K[0],$X, 1,$T,$A,$B,$C,$D,$E);
+ &BODY_00_15( 0,$K[0],$X, 2,$E,$T,$A,$B,$C,$D);
+ &BODY_00_15( 0,$K[0],$X, 3,$D,$E,$T,$A,$B,$C);
+ &BODY_00_15( 0,$K[0],$X, 4,$C,$D,$E,$T,$A,$B);
+ &BODY_00_15( 0,$K[0],$X, 5,$B,$C,$D,$E,$T,$A);
+ &BODY_00_15( 0,$K[0],$X, 6,$A,$B,$C,$D,$E,$T);
+ &BODY_00_15( 0,$K[0],$X, 7,$T,$A,$B,$C,$D,$E);
+ &BODY_00_15( 0,$K[0],$X, 8,$E,$T,$A,$B,$C,$D);
+ &BODY_00_15( 0,$K[0],$X, 9,$D,$E,$T,$A,$B,$C);
+ &BODY_00_15( 0,$K[0],$X,10,$C,$D,$E,$T,$A,$B);
+ &BODY_00_15( 0,$K[0],$X,11,$B,$C,$D,$E,$T,$A);
+ &BODY_00_15( 0,$K[0],$X,12,$A,$B,$C,$D,$E,$T);
+ &BODY_00_15( 0,$K[0],$X,13,$T,$A,$B,$C,$D,$E);
+ &BODY_00_15( 0,$K[0],$X,14,$E,$T,$A,$B,$C,$D);
+ &BODY_00_15( 1,$K[0],$X,15,$D,$E,$T,$A,$B,$C);
+ &BODY_16_19(-1,$K[0],$X,16,$C,$D,$E,$T,$A,$B);
+ &BODY_16_19( 0,$K[0],$X,17,$B,$C,$D,$E,$T,$A);
+ &BODY_16_19( 0,$K[0],$X,18,$A,$B,$C,$D,$E,$T);
+ &BODY_16_19( 1,$K[0],$X,19,$T,$A,$B,$C,$D,$E);
+
+ &BODY_20_39(-1,$K[1],$X,20,$E,$T,$A,$B,$C,$D);
+ &BODY_20_39( 0,$K[1],$X,21,$D,$E,$T,$A,$B,$C);
+ &BODY_20_39( 0,$K[1],$X,22,$C,$D,$E,$T,$A,$B);
+ &BODY_20_39( 0,$K[1],$X,23,$B,$C,$D,$E,$T,$A);
+ &BODY_20_39( 0,$K[1],$X,24,$A,$B,$C,$D,$E,$T);
+ &BODY_20_39( 0,$K[1],$X,25,$T,$A,$B,$C,$D,$E);
+ &BODY_20_39( 0,$K[1],$X,26,$E,$T,$A,$B,$C,$D);
+ &BODY_20_39( 0,$K[1],$X,27,$D,$E,$T,$A,$B,$C);
+ &BODY_20_39( 0,$K[1],$X,28,$C,$D,$E,$T,$A,$B);
+ &BODY_20_39( 0,$K[1],$X,29,$B,$C,$D,$E,$T,$A);
+ &BODY_20_39( 0,$K[1],$X,30,$A,$B,$C,$D,$E,$T);
+ &BODY_20_39( 0,$K[1],$X,31,$T,$A,$B,$C,$D,$E);
+ &BODY_20_39( 0,$K[1],$X,32,$E,$T,$A,$B,$C,$D);
+ &BODY_20_39( 0,$K[1],$X,33,$D,$E,$T,$A,$B,$C);
+ &BODY_20_39( 0,$K[1],$X,34,$C,$D,$E,$T,$A,$B);
+ &BODY_20_39( 0,$K[1],$X,35,$B,$C,$D,$E,$T,$A);
+ &BODY_20_39( 0,$K[1],$X,36,$A,$B,$C,$D,$E,$T);
+ &BODY_20_39( 0,$K[1],$X,37,$T,$A,$B,$C,$D,$E);
+ &BODY_20_39( 0,$K[1],$X,38,$E,$T,$A,$B,$C,$D);
+ &BODY_20_39( 1,$K[1],$X,39,$D,$E,$T,$A,$B,$C);
+
+ &BODY_40_59(-1,$K[2],$X,40,$C,$D,$E,$T,$A,$B);
+ &BODY_40_59( 0,$K[2],$X,41,$B,$C,$D,$E,$T,$A);
+ &BODY_40_59( 0,$K[2],$X,42,$A,$B,$C,$D,$E,$T);
+ &BODY_40_59( 0,$K[2],$X,43,$T,$A,$B,$C,$D,$E);
+ &BODY_40_59( 0,$K[2],$X,44,$E,$T,$A,$B,$C,$D);
+ &BODY_40_59( 0,$K[2],$X,45,$D,$E,$T,$A,$B,$C);
+ &BODY_40_59( 0,$K[2],$X,46,$C,$D,$E,$T,$A,$B);
+ &BODY_40_59( 0,$K[2],$X,47,$B,$C,$D,$E,$T,$A);
+ &BODY_40_59( 0,$K[2],$X,48,$A,$B,$C,$D,$E,$T);
+ &BODY_40_59( 0,$K[2],$X,49,$T,$A,$B,$C,$D,$E);
+ &BODY_40_59( 0,$K[2],$X,50,$E,$T,$A,$B,$C,$D);
+ &BODY_40_59( 0,$K[2],$X,51,$D,$E,$T,$A,$B,$C);
+ &BODY_40_59( 0,$K[2],$X,52,$C,$D,$E,$T,$A,$B);
+ &BODY_40_59( 0,$K[2],$X,53,$B,$C,$D,$E,$T,$A);
+ &BODY_40_59( 0,$K[2],$X,54,$A,$B,$C,$D,$E,$T);
+ &BODY_40_59( 0,$K[2],$X,55,$T,$A,$B,$C,$D,$E);
+ &BODY_40_59( 0,$K[2],$X,56,$E,$T,$A,$B,$C,$D);
+ &BODY_40_59( 0,$K[2],$X,57,$D,$E,$T,$A,$B,$C);
+ &BODY_40_59( 0,$K[2],$X,58,$C,$D,$E,$T,$A,$B);
+ &BODY_40_59( 1,$K[2],$X,59,$B,$C,$D,$E,$T,$A);
+
+ &BODY_60_79(-1,$K[3],$X,60,$A,$B,$C,$D,$E,$T);
+ &BODY_60_79( 0,$K[3],$X,61,$T,$A,$B,$C,$D,$E);
+ &BODY_60_79( 0,$K[3],$X,62,$E,$T,$A,$B,$C,$D);
+ &BODY_60_79( 0,$K[3],$X,63,$D,$E,$T,$A,$B,$C);
+ &BODY_60_79( 0,$K[3],$X,64,$C,$D,$E,$T,$A,$B);
+ &BODY_60_79( 0,$K[3],$X,65,$B,$C,$D,$E,$T,$A);
+ &BODY_60_79( 0,$K[3],$X,66,$A,$B,$C,$D,$E,$T);
+ &BODY_60_79( 0,$K[3],$X,67,$T,$A,$B,$C,$D,$E);
+ &BODY_60_79( 0,$K[3],$X,68,$E,$T,$A,$B,$C,$D);
+ &BODY_60_79( 0,$K[3],$X,69,$D,$E,$T,$A,$B,$C);
+ &BODY_60_79( 0,$K[3],$X,70,$C,$D,$E,$T,$A,$B);
+ &BODY_60_79( 0,$K[3],$X,71,$B,$C,$D,$E,$T,$A);
+ &BODY_60_79( 0,$K[3],$X,72,$A,$B,$C,$D,$E,$T);
+ &BODY_60_79( 0,$K[3],$X,73,$T,$A,$B,$C,$D,$E);
+ &BODY_60_79( 0,$K[3],$X,74,$E,$T,$A,$B,$C,$D);
+ &BODY_60_79( 0,$K[3],$X,75,$D,$E,$T,$A,$B,$C);
+ &BODY_60_79( 0,$K[3],$X,76,$C,$D,$E,$T,$A,$B);
+ &BODY_60_79( 0,$K[3],$X,77,$B,$C,$D,$E,$T,$A);
+ &BODY_60_79( 0,$K[3],$X,78,$A,$B,$C,$D,$E,$T);
+ &BODY_60_79( 2,$K[3],$X,79,$T,$A,$B,$C,$D,$E);
+
+ &comment("End processing");
+ &comment("");
+ # D is the tmp value
+
+ # E -> A
+ # T -> B
+ # A -> C
+ # B -> D
+ # C -> E
+ # D -> T
+
+ # The last 2 have been moved into the last loop
+ # &mov($tmp1,&wparam(0));
+
+ &mov($D, &DWP(12,$tmp1,"",0));
+ &add($D,$B);
+ &mov($B, &DWP( 4,$tmp1,"",0));
+ &add($B,$T);
+ &mov($T, $A);
+ &mov($A, &DWP( 0,$tmp1,"",0));
+ &mov(&DWP(12,$tmp1,"",0),$D);
+
+ &add($A,$E);
+ &mov($E, &DWP(16,$tmp1,"",0));
+ &add($E,$C);
+ &mov($C, &DWP( 8,$tmp1,"",0));
+ &add($C,$T);
+
+ &mov(&DWP( 0,$tmp1,"",0),$A);
+ &mov("esi",&swtmp(16));
+ &mov(&DWP( 8,$tmp1,"",0),$C); # This is for looping
+ &add("esi",64);
+ &mov("eax",&swtmp(17));
+ &mov(&DWP(16,$tmp1,"",0),$E);
+ &cmp("eax","esi");
+ &mov(&DWP( 4,$tmp1,"",0),$B); # This is for looping
+ &jl(&label("end"));
+ &mov("eax",&DWP(0,"esi","",0)); # Pulled down from
+ &jmp(&label("start"));
+
+ &set_label("end");
+ &stack_pop(18);
+ &pop("edi");
+ &pop("ebx");
+ &pop("ebp");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
diff --git a/crypto/sha/asm/sx86unix.cpp b/crypto/sha/asm/sx86unix.cpp
new file mode 100644
index 0000000000..8366664a39
--- /dev/null
+++ b/crypto/sha/asm/sx86unix.cpp
@@ -0,0 +1,1948 @@
+/* Run the C pre-processor over this file with one of the following defined
+ * ELF - elf object files,
+ * OUT - a.out object files,
+ * BSDI - BSDI style a.out object files
+ * SOL - Solaris style elf
+ */
+
+#define TYPE(a,b) .type a,b
+#define SIZE(a,b) .size a,b
+
+#if defined(OUT) || defined(BSDI)
+#define sha1_block_x86 _sha1_block_x86
+
+#endif
+
+#ifdef OUT
+#define OK 1
+#define ALIGN 4
+#endif
+
+#ifdef BSDI
+#define OK 1
+#define ALIGN 4
+#undef SIZE
+#undef TYPE
+#define SIZE(a,b)
+#define TYPE(a,b)
+#endif
+
+#if defined(ELF) || defined(SOL)
+#define OK 1
+#define ALIGN 16
+#endif
+
+#ifndef OK
+You need to define one of
+ELF - elf systems - linux-elf, NetBSD and DG-UX
+OUT - a.out systems - linux-a.out and FreeBSD
+SOL - solaris systems, which are elf with strange comment lines
+BSDI - a.out with a very primative version of as.
+#endif
+
+/* Let the Assembler begin :-) */
+ /* Don't even think of reading this code */
+ /* It was automatically generated by sha1-586.pl */
+ /* Which is a perl program used to generate the x86 assember for */
+ /* any of elf, a.out, BSDI,Win32, or Solaris */
+ /* eric <eay@cryptsoft.com> */
+
+ .file "sha1-586.s"
+ .version "01.01"
+gcc2_compiled.:
+.text
+ .align ALIGN
+.globl sha1_block_x86
+ TYPE(sha1_block_x86,@function)
+sha1_block_x86:
+ pushl %esi
+ pushl %ebp
+ movl 20(%esp), %eax
+ movl 16(%esp), %esi
+ addl %esi, %eax
+ movl 12(%esp), %ebp
+ pushl %ebx
+ subl $64, %eax
+ pushl %edi
+ movl 4(%ebp), %ebx
+ subl $72, %esp
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %edi
+ movl 8(%ebp), %ecx
+ movl %eax, 68(%esp)
+ /* First we need to setup the X array */
+ movl (%esi), %eax
+.L000start:
+ /* First, load the words onto the stack in network byte order */
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, (%esp)
+ movl 4(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 4(%esp)
+ movl 8(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 8(%esp)
+ movl 12(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 12(%esp)
+ movl 16(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 16(%esp)
+ movl 20(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 20(%esp)
+ movl 24(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 24(%esp)
+ movl 28(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 28(%esp)
+ movl 32(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 32(%esp)
+ movl 36(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 36(%esp)
+ movl 40(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 40(%esp)
+ movl 44(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 44(%esp)
+ movl 48(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 48(%esp)
+ movl 52(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 52(%esp)
+ movl 56(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 56(%esp)
+ movl 60(%esi), %eax
+.byte 15
+.byte 200 /* bswapl %eax */
+ movl %eax, 60(%esp)
+ /* We now have the X array on the stack */
+ /* starting at sp-4 */
+ movl %esi, 64(%esp)
+
+ /* Start processing */
+ movl (%ebp), %eax
+ /* 00_15 0 */
+ movl %ecx, %esi
+ movl %eax, %ebp
+ xorl %edx, %esi
+ roll $5, %ebp
+ andl %ebx, %esi
+ addl %edi, %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ movl (%esp), %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %edx, %esi
+ leal 1518500249(%ebp,%edi,1),%ebp
+ movl %ebx, %edi
+ addl %ebp, %esi
+ xorl %ecx, %edi
+ movl %esi, %ebp
+ andl %eax, %edi
+ roll $5, %ebp
+ addl %edx, %ebp
+ movl 4(%esp), %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ xorl %ecx, %edi
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ leal 1518500249(%ebp,%edx,1),%ebp
+ addl %ebp, %edi
+ /* 00_15 2 */
+ movl %eax, %edx
+ movl %edi, %ebp
+ xorl %ebx, %edx
+ roll $5, %ebp
+ andl %esi, %edx
+ addl %ecx, %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ movl 8(%esp), %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebx, %edx
+ leal 1518500249(%ebp,%ecx,1),%ebp
+ movl %esi, %ecx
+ addl %ebp, %edx
+ xorl %eax, %ecx
+ movl %edx, %ebp
+ andl %edi, %ecx
+ roll $5, %ebp
+ addl %ebx, %ebp
+ movl 12(%esp), %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ xorl %eax, %ecx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ leal 1518500249(%ebp,%ebx,1),%ebp
+ addl %ebp, %ecx
+ /* 00_15 4 */
+ movl %edi, %ebx
+ movl %ecx, %ebp
+ xorl %esi, %ebx
+ roll $5, %ebp
+ andl %edx, %ebx
+ addl %eax, %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ movl 16(%esp), %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %esi, %ebx
+ leal 1518500249(%ebp,%eax,1),%ebp
+ movl %edx, %eax
+ addl %ebp, %ebx
+ xorl %edi, %eax
+ movl %ebx, %ebp
+ andl %ecx, %eax
+ roll $5, %ebp
+ addl %esi, %ebp
+ movl 20(%esp), %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ xorl %edi, %eax
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ leal 1518500249(%ebp,%esi,1),%ebp
+ addl %ebp, %eax
+ /* 00_15 6 */
+ movl %ecx, %esi
+ movl %eax, %ebp
+ xorl %edx, %esi
+ roll $5, %ebp
+ andl %ebx, %esi
+ addl %edi, %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ movl 24(%esp), %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %edx, %esi
+ leal 1518500249(%ebp,%edi,1),%ebp
+ movl %ebx, %edi
+ addl %ebp, %esi
+ xorl %ecx, %edi
+ movl %esi, %ebp
+ andl %eax, %edi
+ roll $5, %ebp
+ addl %edx, %ebp
+ movl 28(%esp), %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ xorl %ecx, %edi
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ leal 1518500249(%ebp,%edx,1),%ebp
+ addl %ebp, %edi
+ /* 00_15 8 */
+ movl %eax, %edx
+ movl %edi, %ebp
+ xorl %ebx, %edx
+ roll $5, %ebp
+ andl %esi, %edx
+ addl %ecx, %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ movl 32(%esp), %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebx, %edx
+ leal 1518500249(%ebp,%ecx,1),%ebp
+ movl %esi, %ecx
+ addl %ebp, %edx
+ xorl %eax, %ecx
+ movl %edx, %ebp
+ andl %edi, %ecx
+ roll $5, %ebp
+ addl %ebx, %ebp
+ movl 36(%esp), %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ xorl %eax, %ecx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ leal 1518500249(%ebp,%ebx,1),%ebp
+ addl %ebp, %ecx
+ /* 00_15 10 */
+ movl %edi, %ebx
+ movl %ecx, %ebp
+ xorl %esi, %ebx
+ roll $5, %ebp
+ andl %edx, %ebx
+ addl %eax, %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ movl 40(%esp), %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %esi, %ebx
+ leal 1518500249(%ebp,%eax,1),%ebp
+ movl %edx, %eax
+ addl %ebp, %ebx
+ xorl %edi, %eax
+ movl %ebx, %ebp
+ andl %ecx, %eax
+ roll $5, %ebp
+ addl %esi, %ebp
+ movl 44(%esp), %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ xorl %edi, %eax
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ leal 1518500249(%ebp,%esi,1),%ebp
+ addl %ebp, %eax
+ /* 00_15 12 */
+ movl %ecx, %esi
+ movl %eax, %ebp
+ xorl %edx, %esi
+ roll $5, %ebp
+ andl %ebx, %esi
+ addl %edi, %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ movl 48(%esp), %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %edx, %esi
+ leal 1518500249(%ebp,%edi,1),%ebp
+ movl %ebx, %edi
+ addl %ebp, %esi
+ xorl %ecx, %edi
+ movl %esi, %ebp
+ andl %eax, %edi
+ roll $5, %ebp
+ addl %edx, %ebp
+ movl 52(%esp), %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ xorl %ecx, %edi
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ leal 1518500249(%ebp,%edx,1),%ebp
+ addl %ebp, %edi
+ /* 00_15 14 */
+ movl %eax, %edx
+ movl %edi, %ebp
+ xorl %ebx, %edx
+ roll $5, %ebp
+ andl %esi, %edx
+ addl %ecx, %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ movl 56(%esp), %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebx, %edx
+ leal 1518500249(%ebp,%ecx,1),%ebp
+ movl %esi, %ecx
+ addl %ebp, %edx
+ xorl %eax, %ecx
+ movl %edx, %ebp
+ andl %edi, %ecx
+ roll $5, %ebp
+ addl %ebx, %ebp
+ movl 60(%esp), %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ xorl %eax, %ecx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ leal 1518500249(%ebp,%ebx,1),%ebp
+ addl %ebp, %ecx
+ /* 16_19 16 */
+ nop
+ movl (%esp), %ebp
+ movl 8(%esp), %ebx
+ xorl %ebp, %ebx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edi, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %esi, %ebp
+ movl %ebx, (%esp)
+ andl %edx, %ebp
+ leal 1518500249(%ebx,%eax,1),%ebx
+ xorl %esi, %ebp
+ movl %ecx, %eax
+ addl %ebp, %ebx
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ movl 4(%esp), %eax
+ movl 12(%esp), %ebp
+ xorl %ebp, %eax
+ movl 36(%esp), %ebp
+ xorl %ebp, %eax
+ movl 56(%esp), %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %ebp, %eax
+.byte 209
+.byte 192 /* roll $1 %eax */
+ movl %edx, %ebp
+ xorl %edi, %ebp
+ movl %eax, 4(%esp)
+ andl %ecx, %ebp
+ leal 1518500249(%eax,%esi,1),%eax
+ xorl %edi, %ebp
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %eax
+ /* 16_19 18 */
+ movl 8(%esp), %ebp
+ movl 16(%esp), %esi
+ xorl %ebp, %esi
+ movl 40(%esp), %ebp
+ xorl %ebp, %esi
+ movl 60(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ecx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %edx, %ebp
+ movl %esi, 8(%esp)
+ andl %ebx, %ebp
+ leal 1518500249(%esi,%edi,1),%esi
+ xorl %edx, %ebp
+ movl %eax, %edi
+ addl %ebp, %esi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ movl 12(%esp), %edi
+ movl 20(%esp), %ebp
+ xorl %ebp, %edi
+ movl 44(%esp), %ebp
+ xorl %ebp, %edi
+ movl (%esp), %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %ebp, %edi
+.byte 209
+.byte 199 /* roll $1 %edi */
+ movl %ebx, %ebp
+ xorl %ecx, %ebp
+ movl %edi, 12(%esp)
+ andl %eax, %ebp
+ leal 1518500249(%edi,%edx,1),%edi
+ xorl %ecx, %ebp
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edi
+ /* 20_39 20 */
+ movl 16(%esp), %edx
+ movl 24(%esp), %ebp
+ xorl %ebp, %edx
+ movl 48(%esp), %ebp
+ xorl %ebp, %edx
+ movl 4(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 16(%esp)
+ xorl %ebx, %ebp
+ leal 1859775393(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 21 */
+ movl 20(%esp), %ecx
+ movl 28(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 8(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 20(%esp)
+ xorl %eax, %ebp
+ leal 1859775393(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 22 */
+ movl 24(%esp), %ebx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 56(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 12(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, 24(%esp)
+ xorl %esi, %ebp
+ leal 1859775393(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 23 */
+ movl 28(%esp), %eax
+ movl 36(%esp), %ebp
+ xorl %ebp, %eax
+ movl 60(%esp), %ebp
+ xorl %ebp, %eax
+ movl 16(%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 28(%esp)
+ xorl %edi, %ebp
+ leal 1859775393(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 24 */
+ movl 32(%esp), %esi
+ movl 40(%esp), %ebp
+ xorl %ebp, %esi
+ movl (%esp), %ebp
+ xorl %ebp, %esi
+ movl 20(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 32(%esp)
+ xorl %edx, %ebp
+ leal 1859775393(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 25 */
+ movl 36(%esp), %edi
+ movl 44(%esp), %ebp
+ xorl %ebp, %edi
+ movl 4(%esp), %ebp
+ xorl %ebp, %edi
+ movl 24(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 36(%esp)
+ xorl %ecx, %ebp
+ leal 1859775393(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 26 */
+ movl 40(%esp), %edx
+ movl 48(%esp), %ebp
+ xorl %ebp, %edx
+ movl 8(%esp), %ebp
+ xorl %ebp, %edx
+ movl 28(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 40(%esp)
+ xorl %ebx, %ebp
+ leal 1859775393(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 27 */
+ movl 44(%esp), %ecx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 12(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 44(%esp)
+ xorl %eax, %ebp
+ leal 1859775393(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 28 */
+ movl 48(%esp), %ebx
+ movl 56(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 16(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 36(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, 48(%esp)
+ xorl %esi, %ebp
+ leal 1859775393(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 29 */
+ movl 52(%esp), %eax
+ movl 60(%esp), %ebp
+ xorl %ebp, %eax
+ movl 20(%esp), %ebp
+ xorl %ebp, %eax
+ movl 40(%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 52(%esp)
+ xorl %edi, %ebp
+ leal 1859775393(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 30 */
+ movl 56(%esp), %esi
+ movl (%esp), %ebp
+ xorl %ebp, %esi
+ movl 24(%esp), %ebp
+ xorl %ebp, %esi
+ movl 44(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 56(%esp)
+ xorl %edx, %ebp
+ leal 1859775393(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 31 */
+ movl 60(%esp), %edi
+ movl 4(%esp), %ebp
+ xorl %ebp, %edi
+ movl 28(%esp), %ebp
+ xorl %ebp, %edi
+ movl 48(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 60(%esp)
+ xorl %ecx, %ebp
+ leal 1859775393(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 32 */
+ movl (%esp), %edx
+ movl 8(%esp), %ebp
+ xorl %ebp, %edx
+ movl 32(%esp), %ebp
+ xorl %ebp, %edx
+ movl 52(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, (%esp)
+ xorl %ebx, %ebp
+ leal 1859775393(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 33 */
+ movl 4(%esp), %ecx
+ movl 12(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 36(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 56(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 4(%esp)
+ xorl %eax, %ebp
+ leal 1859775393(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 34 */
+ movl 8(%esp), %ebx
+ movl 16(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 40(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 60(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, 8(%esp)
+ xorl %esi, %ebp
+ leal 1859775393(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 35 */
+ movl 12(%esp), %eax
+ movl 20(%esp), %ebp
+ xorl %ebp, %eax
+ movl 44(%esp), %ebp
+ xorl %ebp, %eax
+ movl (%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 12(%esp)
+ xorl %edi, %ebp
+ leal 1859775393(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 36 */
+ movl 16(%esp), %esi
+ movl 24(%esp), %ebp
+ xorl %ebp, %esi
+ movl 48(%esp), %ebp
+ xorl %ebp, %esi
+ movl 4(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 16(%esp)
+ xorl %edx, %ebp
+ leal 1859775393(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 37 */
+ movl 20(%esp), %edi
+ movl 28(%esp), %ebp
+ xorl %ebp, %edi
+ movl 52(%esp), %ebp
+ xorl %ebp, %edi
+ movl 8(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 20(%esp)
+ xorl %ecx, %ebp
+ leal 1859775393(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 38 */
+ movl 24(%esp), %edx
+ movl 32(%esp), %ebp
+ xorl %ebp, %edx
+ movl 56(%esp), %ebp
+ xorl %ebp, %edx
+ movl 12(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 24(%esp)
+ xorl %ebx, %ebp
+ leal 1859775393(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 39 */
+ movl 28(%esp), %ecx
+ movl 36(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 60(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 16(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 28(%esp)
+ xorl %eax, %ebp
+ leal 1859775393(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 40_59 40 */
+ movl 32(%esp), %ebx
+ movl 40(%esp), %ebp
+ xorl %ebp, %ebx
+ movl (%esp), %ebp
+ xorl %ebp, %ebx
+ movl 20(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ orl %edi, %ebp
+ movl %ebx, 32(%esp)
+ andl %esi, %ebp
+ leal 2400959708(%ebx,%eax,1),%ebx
+ movl %edx, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ andl %edi, %eax
+ orl %eax, %ebp
+ movl %ecx, %eax
+ roll $5, %eax
+ addl %eax, %ebp
+ movl 36(%esp), %eax
+ addl %ebp, %ebx
+ movl 44(%esp), %ebp
+ xorl %ebp, %eax
+ movl 4(%esp), %ebp
+ xorl %ebp, %eax
+ movl 24(%esp), %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %ebp, %eax
+.byte 209
+.byte 192 /* roll $1 %eax */
+ movl %ecx, %ebp
+ movl %eax, 36(%esp)
+ orl %edx, %ebp
+ leal 2400959708(%eax,%esi,1),%eax
+ movl %ecx, %esi
+ andl %edi, %ebp
+ andl %edx, %esi
+ orl %esi, %ebp
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %ebp
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %eax
+ /* 40_59 41 */
+ /* 40_59 42 */
+ movl 40(%esp), %esi
+ movl 48(%esp), %ebp
+ xorl %ebp, %esi
+ movl 8(%esp), %ebp
+ xorl %ebp, %esi
+ movl 28(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ orl %ecx, %ebp
+ movl %esi, 40(%esp)
+ andl %edx, %ebp
+ leal 2400959708(%esi,%edi,1),%esi
+ movl %ebx, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ andl %ecx, %edi
+ orl %edi, %ebp
+ movl %eax, %edi
+ roll $5, %edi
+ addl %edi, %ebp
+ movl 44(%esp), %edi
+ addl %ebp, %esi
+ movl 52(%esp), %ebp
+ xorl %ebp, %edi
+ movl 12(%esp), %ebp
+ xorl %ebp, %edi
+ movl 32(%esp), %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %ebp, %edi
+.byte 209
+.byte 199 /* roll $1 %edi */
+ movl %eax, %ebp
+ movl %edi, 44(%esp)
+ orl %ebx, %ebp
+ leal 2400959708(%edi,%edx,1),%edi
+ movl %eax, %edx
+ andl %ecx, %ebp
+ andl %ebx, %edx
+ orl %edx, %ebp
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %ebp
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edi
+ /* 40_59 43 */
+ /* 40_59 44 */
+ movl 48(%esp), %edx
+ movl 56(%esp), %ebp
+ xorl %ebp, %edx
+ movl 16(%esp), %ebp
+ xorl %ebp, %edx
+ movl 36(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ orl %eax, %ebp
+ movl %edx, 48(%esp)
+ andl %ebx, %ebp
+ leal 2400959708(%edx,%ecx,1),%edx
+ movl %esi, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ andl %eax, %ecx
+ orl %ecx, %ebp
+ movl %edi, %ecx
+ roll $5, %ecx
+ addl %ecx, %ebp
+ movl 52(%esp), %ecx
+ addl %ebp, %edx
+ movl 60(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 20(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 40(%esp), %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebp, %ecx
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ movl %edi, %ebp
+ movl %ecx, 52(%esp)
+ orl %esi, %ebp
+ leal 2400959708(%ecx,%ebx,1),%ecx
+ movl %edi, %ebx
+ andl %eax, %ebp
+ andl %esi, %ebx
+ orl %ebx, %ebp
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ebp
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ecx
+ /* 40_59 45 */
+ /* 40_59 46 */
+ movl 56(%esp), %ebx
+ movl (%esp), %ebp
+ xorl %ebp, %ebx
+ movl 24(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 44(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ orl %edi, %ebp
+ movl %ebx, 56(%esp)
+ andl %esi, %ebp
+ leal 2400959708(%ebx,%eax,1),%ebx
+ movl %edx, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ andl %edi, %eax
+ orl %eax, %ebp
+ movl %ecx, %eax
+ roll $5, %eax
+ addl %eax, %ebp
+ movl 60(%esp), %eax
+ addl %ebp, %ebx
+ movl 4(%esp), %ebp
+ xorl %ebp, %eax
+ movl 28(%esp), %ebp
+ xorl %ebp, %eax
+ movl 48(%esp), %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %ebp, %eax
+.byte 209
+.byte 192 /* roll $1 %eax */
+ movl %ecx, %ebp
+ movl %eax, 60(%esp)
+ orl %edx, %ebp
+ leal 2400959708(%eax,%esi,1),%eax
+ movl %ecx, %esi
+ andl %edi, %ebp
+ andl %edx, %esi
+ orl %esi, %ebp
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %ebp
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %eax
+ /* 40_59 47 */
+ /* 40_59 48 */
+ movl (%esp), %esi
+ movl 8(%esp), %ebp
+ xorl %ebp, %esi
+ movl 32(%esp), %ebp
+ xorl %ebp, %esi
+ movl 52(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ orl %ecx, %ebp
+ movl %esi, (%esp)
+ andl %edx, %ebp
+ leal 2400959708(%esi,%edi,1),%esi
+ movl %ebx, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ andl %ecx, %edi
+ orl %edi, %ebp
+ movl %eax, %edi
+ roll $5, %edi
+ addl %edi, %ebp
+ movl 4(%esp), %edi
+ addl %ebp, %esi
+ movl 12(%esp), %ebp
+ xorl %ebp, %edi
+ movl 36(%esp), %ebp
+ xorl %ebp, %edi
+ movl 56(%esp), %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %ebp, %edi
+.byte 209
+.byte 199 /* roll $1 %edi */
+ movl %eax, %ebp
+ movl %edi, 4(%esp)
+ orl %ebx, %ebp
+ leal 2400959708(%edi,%edx,1),%edi
+ movl %eax, %edx
+ andl %ecx, %ebp
+ andl %ebx, %edx
+ orl %edx, %ebp
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %ebp
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edi
+ /* 40_59 49 */
+ /* 40_59 50 */
+ movl 8(%esp), %edx
+ movl 16(%esp), %ebp
+ xorl %ebp, %edx
+ movl 40(%esp), %ebp
+ xorl %ebp, %edx
+ movl 60(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ orl %eax, %ebp
+ movl %edx, 8(%esp)
+ andl %ebx, %ebp
+ leal 2400959708(%edx,%ecx,1),%edx
+ movl %esi, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ andl %eax, %ecx
+ orl %ecx, %ebp
+ movl %edi, %ecx
+ roll $5, %ecx
+ addl %ecx, %ebp
+ movl 12(%esp), %ecx
+ addl %ebp, %edx
+ movl 20(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 44(%esp), %ebp
+ xorl %ebp, %ecx
+ movl (%esp), %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebp, %ecx
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ movl %edi, %ebp
+ movl %ecx, 12(%esp)
+ orl %esi, %ebp
+ leal 2400959708(%ecx,%ebx,1),%ecx
+ movl %edi, %ebx
+ andl %eax, %ebp
+ andl %esi, %ebx
+ orl %ebx, %ebp
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ebp
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ecx
+ /* 40_59 51 */
+ /* 40_59 52 */
+ movl 16(%esp), %ebx
+ movl 24(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 48(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 4(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ orl %edi, %ebp
+ movl %ebx, 16(%esp)
+ andl %esi, %ebp
+ leal 2400959708(%ebx,%eax,1),%ebx
+ movl %edx, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ andl %edi, %eax
+ orl %eax, %ebp
+ movl %ecx, %eax
+ roll $5, %eax
+ addl %eax, %ebp
+ movl 20(%esp), %eax
+ addl %ebp, %ebx
+ movl 28(%esp), %ebp
+ xorl %ebp, %eax
+ movl 52(%esp), %ebp
+ xorl %ebp, %eax
+ movl 8(%esp), %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %ebp, %eax
+.byte 209
+.byte 192 /* roll $1 %eax */
+ movl %ecx, %ebp
+ movl %eax, 20(%esp)
+ orl %edx, %ebp
+ leal 2400959708(%eax,%esi,1),%eax
+ movl %ecx, %esi
+ andl %edi, %ebp
+ andl %edx, %esi
+ orl %esi, %ebp
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %ebp
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %eax
+ /* 40_59 53 */
+ /* 40_59 54 */
+ movl 24(%esp), %esi
+ movl 32(%esp), %ebp
+ xorl %ebp, %esi
+ movl 56(%esp), %ebp
+ xorl %ebp, %esi
+ movl 12(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ orl %ecx, %ebp
+ movl %esi, 24(%esp)
+ andl %edx, %ebp
+ leal 2400959708(%esi,%edi,1),%esi
+ movl %ebx, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ andl %ecx, %edi
+ orl %edi, %ebp
+ movl %eax, %edi
+ roll $5, %edi
+ addl %edi, %ebp
+ movl 28(%esp), %edi
+ addl %ebp, %esi
+ movl 36(%esp), %ebp
+ xorl %ebp, %edi
+ movl 60(%esp), %ebp
+ xorl %ebp, %edi
+ movl 16(%esp), %ebp
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ xorl %ebp, %edi
+.byte 209
+.byte 199 /* roll $1 %edi */
+ movl %eax, %ebp
+ movl %edi, 28(%esp)
+ orl %ebx, %ebp
+ leal 2400959708(%edi,%edx,1),%edi
+ movl %eax, %edx
+ andl %ecx, %ebp
+ andl %ebx, %edx
+ orl %edx, %ebp
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %ebp
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edi
+ /* 40_59 55 */
+ /* 40_59 56 */
+ movl 32(%esp), %edx
+ movl 40(%esp), %ebp
+ xorl %ebp, %edx
+ movl (%esp), %ebp
+ xorl %ebp, %edx
+ movl 20(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ orl %eax, %ebp
+ movl %edx, 32(%esp)
+ andl %ebx, %ebp
+ leal 2400959708(%edx,%ecx,1),%edx
+ movl %esi, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ andl %eax, %ecx
+ orl %ecx, %ebp
+ movl %edi, %ecx
+ roll $5, %ecx
+ addl %ecx, %ebp
+ movl 36(%esp), %ecx
+ addl %ebp, %edx
+ movl 44(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 4(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 24(%esp), %ebp
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ xorl %ebp, %ecx
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ movl %edi, %ebp
+ movl %ecx, 36(%esp)
+ orl %esi, %ebp
+ leal 2400959708(%ecx,%ebx,1),%ecx
+ movl %edi, %ebx
+ andl %eax, %ebp
+ andl %esi, %ebx
+ orl %ebx, %ebp
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ebp
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ecx
+ /* 40_59 57 */
+ /* 40_59 58 */
+ movl 40(%esp), %ebx
+ movl 48(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 8(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 28(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ orl %edi, %ebp
+ movl %ebx, 40(%esp)
+ andl %esi, %ebp
+ leal 2400959708(%ebx,%eax,1),%ebx
+ movl %edx, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ andl %edi, %eax
+ orl %eax, %ebp
+ movl %ecx, %eax
+ roll $5, %eax
+ addl %eax, %ebp
+ movl 44(%esp), %eax
+ addl %ebp, %ebx
+ movl 52(%esp), %ebp
+ xorl %ebp, %eax
+ movl 12(%esp), %ebp
+ xorl %ebp, %eax
+ movl 32(%esp), %ebp
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ xorl %ebp, %eax
+.byte 209
+.byte 192 /* roll $1 %eax */
+ movl %ecx, %ebp
+ movl %eax, 44(%esp)
+ orl %edx, %ebp
+ leal 2400959708(%eax,%esi,1),%eax
+ movl %ecx, %esi
+ andl %edi, %ebp
+ andl %edx, %esi
+ orl %esi, %ebp
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %ebp
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %eax
+ /* 40_59 59 */
+ /* 20_39 60 */
+ movl 48(%esp), %esi
+ movl 56(%esp), %ebp
+ xorl %ebp, %esi
+ movl 16(%esp), %ebp
+ xorl %ebp, %esi
+ movl 36(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 48(%esp)
+ xorl %edx, %ebp
+ leal 3395469782(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 61 */
+ movl 52(%esp), %edi
+ movl 60(%esp), %ebp
+ xorl %ebp, %edi
+ movl 20(%esp), %ebp
+ xorl %ebp, %edi
+ movl 40(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 52(%esp)
+ xorl %ecx, %ebp
+ leal 3395469782(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 62 */
+ movl 56(%esp), %edx
+ movl (%esp), %ebp
+ xorl %ebp, %edx
+ movl 24(%esp), %ebp
+ xorl %ebp, %edx
+ movl 44(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 56(%esp)
+ xorl %ebx, %ebp
+ leal 3395469782(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 63 */
+ movl 60(%esp), %ecx
+ movl 4(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 28(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 48(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 60(%esp)
+ xorl %eax, %ebp
+ leal 3395469782(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 64 */
+ movl (%esp), %ebx
+ movl 8(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, (%esp)
+ xorl %esi, %ebp
+ leal 3395469782(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 65 */
+ movl 4(%esp), %eax
+ movl 12(%esp), %ebp
+ xorl %ebp, %eax
+ movl 36(%esp), %ebp
+ xorl %ebp, %eax
+ movl 56(%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 4(%esp)
+ xorl %edi, %ebp
+ leal 3395469782(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 66 */
+ movl 8(%esp), %esi
+ movl 16(%esp), %ebp
+ xorl %ebp, %esi
+ movl 40(%esp), %ebp
+ xorl %ebp, %esi
+ movl 60(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 8(%esp)
+ xorl %edx, %ebp
+ leal 3395469782(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 67 */
+ movl 12(%esp), %edi
+ movl 20(%esp), %ebp
+ xorl %ebp, %edi
+ movl 44(%esp), %ebp
+ xorl %ebp, %edi
+ movl (%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 12(%esp)
+ xorl %ecx, %ebp
+ leal 3395469782(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 68 */
+ movl 16(%esp), %edx
+ movl 24(%esp), %ebp
+ xorl %ebp, %edx
+ movl 48(%esp), %ebp
+ xorl %ebp, %edx
+ movl 4(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 16(%esp)
+ xorl %ebx, %ebp
+ leal 3395469782(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 69 */
+ movl 20(%esp), %ecx
+ movl 28(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 8(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 20(%esp)
+ xorl %eax, %ebp
+ leal 3395469782(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 70 */
+ movl 24(%esp), %ebx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 56(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 12(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, 24(%esp)
+ xorl %esi, %ebp
+ leal 3395469782(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 71 */
+ movl 28(%esp), %eax
+ movl 36(%esp), %ebp
+ xorl %ebp, %eax
+ movl 60(%esp), %ebp
+ xorl %ebp, %eax
+ movl 16(%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 28(%esp)
+ xorl %edi, %ebp
+ leal 3395469782(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 72 */
+ movl 32(%esp), %esi
+ movl 40(%esp), %ebp
+ xorl %ebp, %esi
+ movl (%esp), %ebp
+ xorl %ebp, %esi
+ movl 20(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 32(%esp)
+ xorl %edx, %ebp
+ leal 3395469782(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 73 */
+ movl 36(%esp), %edi
+ movl 44(%esp), %ebp
+ xorl %ebp, %edi
+ movl 4(%esp), %ebp
+ xorl %ebp, %edi
+ movl 24(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 36(%esp)
+ xorl %ecx, %ebp
+ leal 3395469782(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %ebp, %edx
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+ /* 20_39 74 */
+ movl 40(%esp), %edx
+ movl 48(%esp), %ebp
+ xorl %ebp, %edx
+ movl 8(%esp), %ebp
+ xorl %ebp, %edx
+ movl 28(%esp), %ebp
+ xorl %ebp, %edx
+ movl %esi, %ebp
+.byte 209
+.byte 194 /* roll $1 %edx */
+ xorl %eax, %ebp
+ movl %edx, 40(%esp)
+ xorl %ebx, %ebp
+ leal 3395469782(%edx,%ecx,1),%edx
+ movl %edi, %ecx
+ roll $5, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ebp, %ecx
+.byte 209
+.byte 206 /* rorl $1 %esi */
+ addl %ecx, %edx
+ /* 20_39 75 */
+ movl 44(%esp), %ecx
+ movl 52(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 12(%esp), %ebp
+ xorl %ebp, %ecx
+ movl 32(%esp), %ebp
+ xorl %ebp, %ecx
+ movl %edi, %ebp
+.byte 209
+.byte 193 /* roll $1 %ecx */
+ xorl %esi, %ebp
+ movl %ecx, 44(%esp)
+ xorl %eax, %ebp
+ leal 3395469782(%ecx,%ebx,1),%ecx
+ movl %edx, %ebx
+ roll $5, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebp, %ebx
+.byte 209
+.byte 207 /* rorl $1 %edi */
+ addl %ebx, %ecx
+ /* 20_39 76 */
+ movl 48(%esp), %ebx
+ movl 56(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 16(%esp), %ebp
+ xorl %ebp, %ebx
+ movl 36(%esp), %ebp
+ xorl %ebp, %ebx
+ movl %edx, %ebp
+.byte 209
+.byte 195 /* roll $1 %ebx */
+ xorl %edi, %ebp
+ movl %ebx, 48(%esp)
+ xorl %esi, %ebp
+ leal 3395469782(%ebx,%eax,1),%ebx
+ movl %ecx, %eax
+ roll $5, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %ebp, %eax
+.byte 209
+.byte 202 /* rorl $1 %edx */
+ addl %eax, %ebx
+ /* 20_39 77 */
+ movl 52(%esp), %eax
+ movl 60(%esp), %ebp
+ xorl %ebp, %eax
+ movl 20(%esp), %ebp
+ xorl %ebp, %eax
+ movl 40(%esp), %ebp
+ xorl %ebp, %eax
+ movl %ecx, %ebp
+.byte 209
+.byte 192 /* roll $1 %eax */
+ xorl %edx, %ebp
+ movl %eax, 52(%esp)
+ xorl %edi, %ebp
+ leal 3395469782(%eax,%esi,1),%eax
+ movl %ebx, %esi
+ roll $5, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %ebp, %esi
+.byte 209
+.byte 201 /* rorl $1 %ecx */
+ addl %esi, %eax
+ /* 20_39 78 */
+ movl 56(%esp), %esi
+ movl (%esp), %ebp
+ xorl %ebp, %esi
+ movl 24(%esp), %ebp
+ xorl %ebp, %esi
+ movl 44(%esp), %ebp
+ xorl %ebp, %esi
+ movl %ebx, %ebp
+.byte 209
+.byte 198 /* roll $1 %esi */
+ xorl %ecx, %ebp
+ movl %esi, 56(%esp)
+ xorl %edx, %ebp
+ leal 3395469782(%esi,%edi,1),%esi
+ movl %eax, %edi
+ roll $5, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %ebp, %edi
+.byte 209
+.byte 203 /* rorl $1 %ebx */
+ addl %edi, %esi
+ /* 20_39 79 */
+ movl 60(%esp), %edi
+ movl 4(%esp), %ebp
+ xorl %ebp, %edi
+ movl 28(%esp), %ebp
+ xorl %ebp, %edi
+ movl 48(%esp), %ebp
+ xorl %ebp, %edi
+ movl %eax, %ebp
+.byte 209
+.byte 199 /* roll $1 %edi */
+ xorl %ebx, %ebp
+ movl %edi, 60(%esp)
+ xorl %ecx, %ebp
+ leal 3395469782(%edi,%edx,1),%edi
+ movl %esi, %edx
+ roll $5, %edx
+ addl %ebp, %edx
+ movl 92(%esp), %ebp
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ addl %edx, %edi
+.byte 209
+.byte 200 /* rorl $1 %eax */
+ /* End processing */
+
+ movl 12(%ebp), %edx
+ addl %ebx, %edx
+ movl 4(%ebp), %ebx
+ addl %esi, %ebx
+ movl %eax, %esi
+ movl (%ebp), %eax
+ movl %edx, 12(%ebp)
+ addl %edi, %eax
+ movl 16(%ebp), %edi
+ addl %ecx, %edi
+ movl 8(%ebp), %ecx
+ addl %esi, %ecx
+ movl %eax, (%ebp)
+ movl 64(%esp), %esi
+ movl %ecx, 8(%ebp)
+ addl $64, %esi
+ movl 68(%esp), %eax
+ movl %edi, 16(%ebp)
+ cmpl %esi, %eax
+ movl %ebx, 4(%ebp)
+ jl .L001end
+ movl (%esi), %eax
+ jmp .L000start
+.L001end:
+ addl $72, %esp
+ popl %edi
+ popl %ebx
+ popl %ebp
+ popl %esi
+ ret
+.sha1_block_x86_end:
+ SIZE(sha1_block_x86,.sha1_block_x86_end-sha1_block_x86)
+.ident "desasm.pl"
diff --git a/crypto/sha/sha.c b/crypto/sha/sha.c
index 2fcd9636ee..713fec3610 100644
--- a/crypto/sha/sha.c
+++ b/crypto/sha/sha.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
diff --git a/crypto/sha/sha.h b/crypto/sha/sha.h
index 9e22fa87c0..4cf0ea0225 100644
--- a/crypto/sha/sha.h
+++ b/crypto/sha/sha.h
@@ -1,5 +1,5 @@
/* crypto/sha/sha.h */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -83,19 +83,23 @@ void SHA_Init(SHA_CTX *c);
void SHA_Update(SHA_CTX *c, unsigned char *data, unsigned long len);
void SHA_Final(unsigned char *md, SHA_CTX *c);
unsigned char *SHA(unsigned char *d, unsigned long n,unsigned char *md);
+void SHA_Transform(SHA_CTX *c, unsigned char *data);
void SHA1_Init(SHA_CTX *c);
void SHA1_Update(SHA_CTX *c, unsigned char *data, unsigned long len);
void SHA1_Final(unsigned char *md, SHA_CTX *c);
unsigned char *SHA1(unsigned char *d, unsigned long n,unsigned char *md);
+void SHA1_Transform(SHA_CTX *c, unsigned char *data);
#else
void SHA_Init();
void SHA_Update();
void SHA_Final();
unsigned char *SHA();
+void SHA_Transform();
void SHA1_Init();
void SHA1_Update();
void SHA1_Final();
unsigned char *SHA1();
+void SHA1_Transform();
#endif
#ifdef __cplusplus
diff --git a/crypto/sha/sha1.c b/crypto/sha/sha1.c
new file mode 100644
index 0000000000..a4739ac9fd
--- /dev/null
+++ b/crypto/sha/sha1.c
@@ -0,0 +1,135 @@
+/* crypto/sha/sha1.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "sha.h"
+
+#define BUFSIZE 1024*16
+
+#ifndef NOPROTO
+void do_fp(FILE *f);
+void pt(unsigned char *md);
+int read(int, void *, unsigned int);
+#else
+void do_fp();
+void pt();
+int read();
+#endif
+
+int main(argc, argv)
+int argc;
+char **argv;
+ {
+ int i,err=0;
+ FILE *IN;
+
+ if (argc == 1)
+ {
+ do_fp(stdin);
+ }
+ else
+ {
+ for (i=1; i<argc; i++)
+ {
+ IN=fopen(argv[i],"r");
+ if (IN == NULL)
+ {
+ perror(argv[i]);
+ err++;
+ continue;
+ }
+ printf("SHA1(%s)= ",argv[i]);
+ do_fp(IN);
+ fclose(IN);
+ }
+ }
+ exit(err);
+ }
+
+void do_fp(f)
+FILE *f;
+ {
+ SHA_CTX c;
+ unsigned char md[SHA_DIGEST_LENGTH];
+ int fd;
+ int i;
+ unsigned char buf[BUFSIZE];
+
+ fd=fileno(f);
+ SHA1_Init(&c);
+ for (;;)
+ {
+ i=read(fd,buf,BUFSIZE);
+ if (i <= 0) break;
+ SHA1_Update(&c,buf,(unsigned long)i);
+ }
+ SHA1_Final(&(md[0]),&c);
+ pt(md);
+ }
+
+void pt(md)
+unsigned char *md;
+ {
+ int i;
+
+ for (i=0; i<SHA_DIGEST_LENGTH; i++)
+ printf("%02x",md[i]);
+ printf("\n");
+ }
+
diff --git a/crypto/sha/sha1_one.c b/crypto/sha/sha1_one.c
index cf381fa393..fe5770d601 100644
--- a/crypto/sha/sha1_one.c
+++ b/crypto/sha/sha1_one.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha1_one.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -57,7 +57,7 @@
*/
#include <stdio.h>
-#include "cryptlib.h"
+#include <string.h>
#include "sha.h"
unsigned char *SHA1(d, n, md)
diff --git a/crypto/sha/sha1dgst.c b/crypto/sha/sha1dgst.c
index 1a181263ad..2b0ae1f0d4 100644
--- a/crypto/sha/sha1dgst.c
+++ b/crypto/sha/sha1dgst.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha1dgst.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -57,12 +57,13 @@
*/
#include <stdio.h>
+#include <string.h>
#undef SHA_0
#define SHA_1
#include "sha.h"
#include "sha_locl.h"
-char *SHA1_version="SHA1 part of SSLeay 0.8.1b 29-Jun-1998";
+char *SHA1_version="SHA1 part of SSLeay 0.9.0b 29-Jun-1998";
/* Implemented from SHA-1 document - The Secure Hash Algorithm
*/
@@ -79,9 +80,34 @@ char *SHA1_version="SHA1 part of SSLeay 0.8.1b 29-Jun-1998";
#define K_60_79 0xca62c1d6L
#ifndef NOPROTO
-static void sha1_block(SHA_CTX *c, register unsigned long *p);
+# ifdef SHA1_ASM
+ void sha1_block_x86(SHA_CTX *c, register unsigned long *p, int num);
+# define sha1_block sha1_block_x86
+# else
+ void sha1_block(SHA_CTX *c, register unsigned long *p, int num);
+# endif
#else
-static void sha1_block();
+# ifdef SHA1_ASM
+ void sha1_block_x86();
+# define sha1_block sha1_block_x86
+# else
+ void sha1_block();
+# endif
+#endif
+
+
+#if defined(L_ENDIAN) && defined(SHA1_ASM)
+# define M_c2nl c2l
+# define M_p_c2nl p_c2l
+# define M_c2nl_p c2l_p
+# define M_p_c2nl_p p_c2l_p
+# define M_nl2c l2c
+#else
+# define M_c2nl c2nl
+# define M_p_c2nl p_c2nl
+# define M_c2nl_p c2nl_p
+# define M_p_c2nl_p p_c2nl_p
+# define M_nl2c nl2c
#endif
void SHA1_Init(c)
@@ -108,7 +134,7 @@ unsigned long len;
if (len == 0) return;
- l=(c->Nl+(len<<3))&0xffffffff;
+ l=(c->Nl+(len<<3))&0xffffffffL;
if (l < c->Nl) /* overflow */
c->Nh++;
c->Nh+=(len>>29);
@@ -123,16 +149,16 @@ unsigned long len;
if ((c->num+len) >= SHA_CBLOCK)
{
l= p[sw];
- p_c2nl(data,l,sc);
+ M_p_c2nl(data,l,sc);
p[sw++]=l;
for (; sw<SHA_LBLOCK; sw++)
{
- c2nl(data,l);
+ M_c2nl(data,l);
p[sw]=l;
}
len-=(SHA_CBLOCK-c->num);
- sha1_block(c,p);
+ sha1_block(c,p,64);
c->num=0;
/* drop through and do the rest */
}
@@ -142,7 +168,7 @@ unsigned long len;
if ((sc+len) < 4) /* ugly, add char's to a word */
{
l= p[sw];
- p_c2nl_p(data,l,sc,len);
+ M_p_c2nl_p(data,l,sc,len);
p[sw]=l;
}
else
@@ -150,28 +176,51 @@ unsigned long len;
ew=(c->num>>2);
ec=(c->num&0x03);
l= p[sw];
- p_c2nl(data,l,sc);
+ M_p_c2nl(data,l,sc);
p[sw++]=l;
for (; sw < ew; sw++)
- { c2nl(data,l); p[sw]=l; }
+ { M_c2nl(data,l); p[sw]=l; }
if (ec)
{
- c2nl_p(data,l,ec);
+ M_c2nl_p(data,l,ec);
p[sw]=l;
}
}
return;
}
}
+ /* We can only do the following code for assember, the reason
+ * being that the sha1_block 'C' version changes the values
+ * in the 'data' array. The assember code avoids this and
+ * copies it to a local array. I should be able to do this for
+ * the C version as well....
+ */
+#if 1
+#if defined(B_ENDIAN) || defined(SHA1_ASM)
+ if ((((unsigned int)data)%sizeof(ULONG)) == 0)
+ {
+ sw=len/SHA_CBLOCK;
+ if (sw)
+ {
+ sw*=SHA_CBLOCK;
+ sha1_block(c,(ULONG *)data,sw);
+ data+=sw;
+ len-=sw;
+ }
+ }
+#endif
+#endif
/* we now can process the input data in blocks of SHA_CBLOCK
* chars and save the leftovers to c->data. */
p=c->data;
while (len >= SHA_CBLOCK)
{
#if defined(B_ENDIAN) || defined(L_ENDIAN)
- memcpy(p,data,SHA_CBLOCK);
+ if (p != (unsigned long *)data)
+ memcpy(p,data,SHA_CBLOCK);
data+=SHA_CBLOCK;
-#ifdef L_ENDIAN
+# ifdef L_ENDIAN
+# ifndef SHA1_ASM /* Will not happen */
for (sw=(SHA_LBLOCK/4); sw; sw--)
{
Endian_Reverse32(p[0]);
@@ -180,18 +229,20 @@ unsigned long len;
Endian_Reverse32(p[3]);
p+=4;
}
-#endif
+ p=c->data;
+# endif
+# endif
#else
for (sw=(SHA_BLOCK/4); sw; sw--)
{
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
}
-#endif
p=c->data;
- sha1_block(c,p);
+#endif
+ sha1_block(c,p,64);
len-=SHA_CBLOCK;
}
ec=(int)len;
@@ -200,16 +251,158 @@ unsigned long len;
ec&=0x03;
for (sw=0; sw < ew; sw++)
- { c2nl(data,l); p[sw]=l; }
- c2nl_p(data,l,ec);
+ { M_c2nl(data,l); p[sw]=l; }
+ M_c2nl_p(data,l,ec);
p[sw]=l;
}
-static void sha1_block(c, X)
+void SHA1_Transform(c,b)
SHA_CTX *c;
-register unsigned long *X;
+unsigned char *b;
+ {
+ ULONG p[16];
+#ifndef B_ENDIAN
+ ULONG *q;
+ int i;
+#endif
+
+#if defined(B_ENDIAN) || defined(L_ENDIAN)
+ memcpy(p,b,64);
+#ifdef L_ENDIAN
+ q=p;
+ for (i=(SHA_LBLOCK/4); i; i--)
+ {
+ Endian_Reverse32(q[0]);
+ Endian_Reverse32(q[1]);
+ Endian_Reverse32(q[2]);
+ Endian_Reverse32(q[3]);
+ q+=4;
+ }
+#endif
+#else
+ q=p;
+ for (i=(SHA_LBLOCK/4); i; i--)
+ {
+ ULONG l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ }
+#endif
+ sha1_block(c,p,64);
+ }
+
+#ifndef SHA1_ASM
+
+void sha1_block(c, W, num)
+SHA_CTX *c;
+register unsigned long *W;
+int num;
{
register ULONG A,B,C,D,E,T;
+ ULONG X[16];
+
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
+
+ for (;;)
+ {
+ BODY_00_15( 0,A,B,C,D,E,T,W);
+ BODY_00_15( 1,T,A,B,C,D,E,W);
+ BODY_00_15( 2,E,T,A,B,C,D,W);
+ BODY_00_15( 3,D,E,T,A,B,C,W);
+ BODY_00_15( 4,C,D,E,T,A,B,W);
+ BODY_00_15( 5,B,C,D,E,T,A,W);
+ BODY_00_15( 6,A,B,C,D,E,T,W);
+ BODY_00_15( 7,T,A,B,C,D,E,W);
+ BODY_00_15( 8,E,T,A,B,C,D,W);
+ BODY_00_15( 9,D,E,T,A,B,C,W);
+ BODY_00_15(10,C,D,E,T,A,B,W);
+ BODY_00_15(11,B,C,D,E,T,A,W);
+ BODY_00_15(12,A,B,C,D,E,T,W);
+ BODY_00_15(13,T,A,B,C,D,E,W);
+ BODY_00_15(14,E,T,A,B,C,D,W);
+ BODY_00_15(15,D,E,T,A,B,C,W);
+ BODY_16_19(16,C,D,E,T,A,B,W,W,W,W);
+ BODY_16_19(17,B,C,D,E,T,A,W,W,W,W);
+ BODY_16_19(18,A,B,C,D,E,T,W,W,W,W);
+ BODY_16_19(19,T,A,B,C,D,E,W,W,W,X);
+
+ BODY_20_31(20,E,T,A,B,C,D,W,W,W,X);
+ BODY_20_31(21,D,E,T,A,B,C,W,W,W,X);
+ BODY_20_31(22,C,D,E,T,A,B,W,W,W,X);
+ BODY_20_31(23,B,C,D,E,T,A,W,W,W,X);
+ BODY_20_31(24,A,B,C,D,E,T,W,W,X,X);
+ BODY_20_31(25,T,A,B,C,D,E,W,W,X,X);
+ BODY_20_31(26,E,T,A,B,C,D,W,W,X,X);
+ BODY_20_31(27,D,E,T,A,B,C,W,W,X,X);
+ BODY_20_31(28,C,D,E,T,A,B,W,W,X,X);
+ BODY_20_31(29,B,C,D,E,T,A,W,W,X,X);
+ BODY_20_31(30,A,B,C,D,E,T,W,X,X,X);
+ BODY_20_31(31,T,A,B,C,D,E,W,X,X,X);
+ BODY_32_39(32,E,T,A,B,C,D,X);
+ BODY_32_39(33,D,E,T,A,B,C,X);
+ BODY_32_39(34,C,D,E,T,A,B,X);
+ BODY_32_39(35,B,C,D,E,T,A,X);
+ BODY_32_39(36,A,B,C,D,E,T,X);
+ BODY_32_39(37,T,A,B,C,D,E,X);
+ BODY_32_39(38,E,T,A,B,C,D,X);
+ BODY_32_39(39,D,E,T,A,B,C,X);
+
+ BODY_40_59(40,C,D,E,T,A,B,X);
+ BODY_40_59(41,B,C,D,E,T,A,X);
+ BODY_40_59(42,A,B,C,D,E,T,X);
+ BODY_40_59(43,T,A,B,C,D,E,X);
+ BODY_40_59(44,E,T,A,B,C,D,X);
+ BODY_40_59(45,D,E,T,A,B,C,X);
+ BODY_40_59(46,C,D,E,T,A,B,X);
+ BODY_40_59(47,B,C,D,E,T,A,X);
+ BODY_40_59(48,A,B,C,D,E,T,X);
+ BODY_40_59(49,T,A,B,C,D,E,X);
+ BODY_40_59(50,E,T,A,B,C,D,X);
+ BODY_40_59(51,D,E,T,A,B,C,X);
+ BODY_40_59(52,C,D,E,T,A,B,X);
+ BODY_40_59(53,B,C,D,E,T,A,X);
+ BODY_40_59(54,A,B,C,D,E,T,X);
+ BODY_40_59(55,T,A,B,C,D,E,X);
+ BODY_40_59(56,E,T,A,B,C,D,X);
+ BODY_40_59(57,D,E,T,A,B,C,X);
+ BODY_40_59(58,C,D,E,T,A,B,X);
+ BODY_40_59(59,B,C,D,E,T,A,X);
+
+ BODY_60_79(60,A,B,C,D,E,T,X);
+ BODY_60_79(61,T,A,B,C,D,E,X);
+ BODY_60_79(62,E,T,A,B,C,D,X);
+ BODY_60_79(63,D,E,T,A,B,C,X);
+ BODY_60_79(64,C,D,E,T,A,B,X);
+ BODY_60_79(65,B,C,D,E,T,A,X);
+ BODY_60_79(66,A,B,C,D,E,T,X);
+ BODY_60_79(67,T,A,B,C,D,E,X);
+ BODY_60_79(68,E,T,A,B,C,D,X);
+ BODY_60_79(69,D,E,T,A,B,C,X);
+ BODY_60_79(70,C,D,E,T,A,B,X);
+ BODY_60_79(71,B,C,D,E,T,A,X);
+ BODY_60_79(72,A,B,C,D,E,T,X);
+ BODY_60_79(73,T,A,B,C,D,E,X);
+ BODY_60_79(74,E,T,A,B,C,D,X);
+ BODY_60_79(75,D,E,T,A,B,C,X);
+ BODY_60_79(76,C,D,E,T,A,B,X);
+ BODY_60_79(77,B,C,D,E,T,A,X);
+ BODY_60_79(78,A,B,C,D,E,T,X);
+ BODY_60_79(79,T,A,B,C,D,E,X);
+
+ c->h0=(c->h0+E)&0xffffffffL;
+ c->h1=(c->h1+T)&0xffffffffL;
+ c->h2=(c->h2+A)&0xffffffffL;
+ c->h3=(c->h3+B)&0xffffffffL;
+ c->h4=(c->h4+C)&0xffffffffL;
+
+ num-=64;
+ if (num <= 0) break;
A=c->h0;
B=c->h1;
@@ -217,96 +410,10 @@ register unsigned long *X;
D=c->h3;
E=c->h4;
- BODY_00_15( 0,A,B,C,D,E,T);
- BODY_00_15( 1,T,A,B,C,D,E);
- BODY_00_15( 2,E,T,A,B,C,D);
- BODY_00_15( 3,D,E,T,A,B,C);
- BODY_00_15( 4,C,D,E,T,A,B);
- BODY_00_15( 5,B,C,D,E,T,A);
- BODY_00_15( 6,A,B,C,D,E,T);
- BODY_00_15( 7,T,A,B,C,D,E);
- BODY_00_15( 8,E,T,A,B,C,D);
- BODY_00_15( 9,D,E,T,A,B,C);
- BODY_00_15(10,C,D,E,T,A,B);
- BODY_00_15(11,B,C,D,E,T,A);
- BODY_00_15(12,A,B,C,D,E,T);
- BODY_00_15(13,T,A,B,C,D,E);
- BODY_00_15(14,E,T,A,B,C,D);
- BODY_00_15(15,D,E,T,A,B,C);
- BODY_16_19(16,C,D,E,T,A,B);
- BODY_16_19(17,B,C,D,E,T,A);
- BODY_16_19(18,A,B,C,D,E,T);
- BODY_16_19(19,T,A,B,C,D,E);
-
- BODY_20_39(20,E,T,A,B,C,D);
- BODY_20_39(21,D,E,T,A,B,C);
- BODY_20_39(22,C,D,E,T,A,B);
- BODY_20_39(23,B,C,D,E,T,A);
- BODY_20_39(24,A,B,C,D,E,T);
- BODY_20_39(25,T,A,B,C,D,E);
- BODY_20_39(26,E,T,A,B,C,D);
- BODY_20_39(27,D,E,T,A,B,C);
- BODY_20_39(28,C,D,E,T,A,B);
- BODY_20_39(29,B,C,D,E,T,A);
- BODY_20_39(30,A,B,C,D,E,T);
- BODY_20_39(31,T,A,B,C,D,E);
- BODY_20_39(32,E,T,A,B,C,D);
- BODY_20_39(33,D,E,T,A,B,C);
- BODY_20_39(34,C,D,E,T,A,B);
- BODY_20_39(35,B,C,D,E,T,A);
- BODY_20_39(36,A,B,C,D,E,T);
- BODY_20_39(37,T,A,B,C,D,E);
- BODY_20_39(38,E,T,A,B,C,D);
- BODY_20_39(39,D,E,T,A,B,C);
-
- BODY_40_59(40,C,D,E,T,A,B);
- BODY_40_59(41,B,C,D,E,T,A);
- BODY_40_59(42,A,B,C,D,E,T);
- BODY_40_59(43,T,A,B,C,D,E);
- BODY_40_59(44,E,T,A,B,C,D);
- BODY_40_59(45,D,E,T,A,B,C);
- BODY_40_59(46,C,D,E,T,A,B);
- BODY_40_59(47,B,C,D,E,T,A);
- BODY_40_59(48,A,B,C,D,E,T);
- BODY_40_59(49,T,A,B,C,D,E);
- BODY_40_59(50,E,T,A,B,C,D);
- BODY_40_59(51,D,E,T,A,B,C);
- BODY_40_59(52,C,D,E,T,A,B);
- BODY_40_59(53,B,C,D,E,T,A);
- BODY_40_59(54,A,B,C,D,E,T);
- BODY_40_59(55,T,A,B,C,D,E);
- BODY_40_59(56,E,T,A,B,C,D);
- BODY_40_59(57,D,E,T,A,B,C);
- BODY_40_59(58,C,D,E,T,A,B);
- BODY_40_59(59,B,C,D,E,T,A);
-
- BODY_60_79(60,A,B,C,D,E,T);
- BODY_60_79(61,T,A,B,C,D,E);
- BODY_60_79(62,E,T,A,B,C,D);
- BODY_60_79(63,D,E,T,A,B,C);
- BODY_60_79(64,C,D,E,T,A,B);
- BODY_60_79(65,B,C,D,E,T,A);
- BODY_60_79(66,A,B,C,D,E,T);
- BODY_60_79(67,T,A,B,C,D,E);
- BODY_60_79(68,E,T,A,B,C,D);
- BODY_60_79(69,D,E,T,A,B,C);
- BODY_60_79(70,C,D,E,T,A,B);
- BODY_60_79(71,B,C,D,E,T,A);
- BODY_60_79(72,A,B,C,D,E,T);
- BODY_60_79(73,T,A,B,C,D,E);
- BODY_60_79(74,E,T,A,B,C,D);
- BODY_60_79(75,D,E,T,A,B,C);
- BODY_60_79(76,C,D,E,T,A,B);
- BODY_60_79(77,B,C,D,E,T,A);
- BODY_60_79(78,A,B,C,D,E,T);
- BODY_60_79(79,T,A,B,C,D,E);
-
- c->h0=(c->h0+E)&0xffffffff;
- c->h1=(c->h1+T)&0xffffffff;
- c->h2=(c->h2+A)&0xffffffff;
- c->h3=(c->h3+B)&0xffffffff;
- c->h4=(c->h4+C)&0xffffffff;
+ W+=16;
+ }
}
+#endif
void SHA1_Final(md, c)
unsigned char *md;
@@ -326,7 +433,7 @@ SHA_CTX *c;
if ((j&0x03) == 0) p[i]=0;
#endif
l=p[i];
- p_c2nl(cp,l,j&0x03);
+ M_p_c2nl(cp,l,j&0x03);
p[i]=l;
i++;
/* i is the next 'undefined word' */
@@ -334,14 +441,18 @@ SHA_CTX *c;
{
for (; i<SHA_LBLOCK; i++)
p[i]=0;
- sha1_block(c,p);
+ sha1_block(c,p,64);
i=0;
}
for (; i<(SHA_LBLOCK-2); i++)
p[i]=0;
p[SHA_LBLOCK-2]=c->Nh;
p[SHA_LBLOCK-1]=c->Nl;
- sha1_block(c,p);
+#if defined(L_ENDIAN) && defined(SHA1_ASM)
+ Endian_Reverse32(p[SHA_LBLOCK-2]);
+ Endian_Reverse32(p[SHA_LBLOCK-1]);
+#endif
+ sha1_block(c,p,64);
cp=md;
l=c->h0; nl2c(l,cp);
l=c->h1; nl2c(l,cp);
@@ -355,19 +466,3 @@ SHA_CTX *c;
/* memset((char *)&c,0,sizeof(c));*/
}
-#ifdef undef
-int printit(l)
-unsigned long *l;
- {
- int i,ii;
-
- for (i=0; i<2; i++)
- {
- for (ii=0; ii<8; ii++)
- {
- fprintf(stderr,"%08lx ",l[i*8+ii]);
- }
- fprintf(stderr,"\n");
- }
- }
-#endif
diff --git a/crypto/sha/sha1s.cpp b/crypto/sha/sha1s.cpp
new file mode 100644
index 0000000000..0163377de6
--- /dev/null
+++ b/crypto/sha/sha1s.cpp
@@ -0,0 +1,79 @@
+//
+// gettsc.inl
+//
+// gives access to the Pentium's (secret) cycle counter
+//
+// This software was written by Leonard Janke (janke@unixg.ubc.ca)
+// in 1996-7 and is entered, by him, into the public domain.
+
+#if defined(__WATCOMC__)
+void GetTSC(unsigned long&);
+#pragma aux GetTSC = 0x0f 0x31 "mov [edi], eax" parm [edi] modify [edx eax];
+#elif defined(__GNUC__)
+inline
+void GetTSC(unsigned long& tsc)
+{
+ asm volatile(".byte 15, 49\n\t"
+ : "=eax" (tsc)
+ :
+ : "%edx", "%eax");
+}
+#elif defined(_MSC_VER)
+inline
+void GetTSC(unsigned long& tsc)
+{
+ unsigned long a;
+ __asm _emit 0fh
+ __asm _emit 31h
+ __asm mov a, eax;
+ tsc=a;
+}
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "sha.h"
+
+extern "C" {
+void sha1_block_x86(SHA_CTX *ctx, unsigned char *buffer,int num);
+}
+
+void main(int argc,char *argv[])
+ {
+ unsigned char buffer[64*256];
+ SHA_CTX ctx;
+ unsigned long s1,s2,e1,e2;
+ unsigned char k[16];
+ unsigned long data[2];
+ unsigned char iv[8];
+ int i,num=0,numm;
+ int j=0;
+
+ if (argc >= 2)
+ num=atoi(argv[1]);
+
+ if (num == 0) num=16;
+ if (num > 250) num=16;
+ numm=num+2;
+ num*=64;
+ numm*=64;
+
+ for (j=0; j<6; j++)
+ {
+ for (i=0; i<10; i++) /**/
+ {
+ sha1_block_x86(&ctx,buffer,numm);
+ GetTSC(s1);
+ sha1_block_x86(&ctx,buffer,numm);
+ GetTSC(e1);
+ GetTSC(s2);
+ sha1_block_x86(&ctx,buffer,num);
+ GetTSC(e2);
+ sha1_block_x86(&ctx,buffer,num);
+ }
+
+ printf("sha1 (%d bytes) %d %d (%.2f)\n",num,
+ e1-s1,e2-s2,(double)((e1-s1)-(e2-s2))/2);
+ }
+ }
+
diff --git a/crypto/sha/sha1test.c b/crypto/sha/sha1test.c
index 9c172c0241..3c62a218b4 100644
--- a/crypto/sha/sha1test.c
+++ b/crypto/sha/sha1test.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha1test.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -133,7 +133,7 @@ char *argv[];
r=bigret;
if (strcmp(p,r) != 0)
{
- printf("error calculating SHA1 on '%s'\n",p);
+ printf("error calculating SHA1 on 'a' * 1000\n");
printf("got %s instead of %s\n",p,r);
err++;
}
diff --git a/crypto/sha/sha_dgst.c b/crypto/sha/sha_dgst.c
index 311aa6fcc5..8ed533ea26 100644
--- a/crypto/sha/sha_dgst.c
+++ b/crypto/sha/sha_dgst.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha_dgst.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -57,14 +57,15 @@
*/
#include <stdio.h>
-#define SHA_0
-#undef SHA_1
+#include <string.h>
+#define SHA_0
+#undef SHA_1
#include "sha.h"
#include "sha_locl.h"
-char *SHA_version="SHA part of SSLeay 0.8.1b 29-Jun-1998";
+char *SHA_version="SHA part of SSLeay 0.9.0b 29-Jun-1998";
-/* Implemented from SHA document - The Secure Hash Algorithm
+/* Implemented from SHA-0 document - The Secure Hash Algorithm
*/
#define INIT_DATA_h0 (unsigned long)0x67452301L
@@ -79,11 +80,17 @@ char *SHA_version="SHA part of SSLeay 0.8.1b 29-Jun-1998";
#define K_60_79 0xca62c1d6L
#ifndef NOPROTO
-static void sha_block(SHA_CTX *c, register unsigned long *p);
+ void sha_block(SHA_CTX *c, register unsigned long *p, int num);
#else
-static void sha_block();
+ void sha_block();
#endif
+#define M_c2nl c2nl
+#define M_p_c2nl p_c2nl
+#define M_c2nl_p c2nl_p
+#define M_p_c2nl_p p_c2nl_p
+#define M_nl2c nl2c
+
void SHA_Init(c)
SHA_CTX *c;
{
@@ -103,12 +110,12 @@ register unsigned char *data;
unsigned long len;
{
register ULONG *p;
- int sw,sc,ew,ec;
+ int ew,ec,sw,sc;
ULONG l;
if (len == 0) return;
- l=(c->Nl+(len<<3))&0xffffffff;
+ l=(c->Nl+(len<<3))&0xffffffffL;
if (l < c->Nl) /* overflow */
c->Nh++;
c->Nh+=(len>>29);
@@ -123,16 +130,16 @@ unsigned long len;
if ((c->num+len) >= SHA_CBLOCK)
{
l= p[sw];
- p_c2nl(data,l,sc);
+ M_p_c2nl(data,l,sc);
p[sw++]=l;
for (; sw<SHA_LBLOCK; sw++)
{
- c2nl(data,l);
+ M_c2nl(data,l);
p[sw]=l;
}
len-=(SHA_CBLOCK-c->num);
- sha_block(c,p);
+ sha_block(c,p,64);
c->num=0;
/* drop through and do the rest */
}
@@ -142,7 +149,7 @@ unsigned long len;
if ((sc+len) < 4) /* ugly, add char's to a word */
{
l= p[sw];
- p_c2nl_p(data,l,sc,len);
+ M_p_c2nl_p(data,l,sc,len);
p[sw]=l;
}
else
@@ -150,28 +157,51 @@ unsigned long len;
ew=(c->num>>2);
ec=(c->num&0x03);
l= p[sw];
- p_c2nl(data,l,sc);
+ M_p_c2nl(data,l,sc);
p[sw++]=l;
for (; sw < ew; sw++)
- { c2nl(data,l); p[sw]=l; }
+ { M_c2nl(data,l); p[sw]=l; }
if (ec)
{
- c2nl_p(data,l,ec);
+ M_c2nl_p(data,l,ec);
p[sw]=l;
}
}
return;
}
}
+ /* We can only do the following code for assember, the reason
+ * being that the sha_block 'C' version changes the values
+ * in the 'data' array. The assember code avoids this and
+ * copies it to a local array. I should be able to do this for
+ * the C version as well....
+ */
+#if 1
+#if defined(B_ENDIAN) || defined(SHA_ASM)
+ if ((((unsigned int)data)%sizeof(ULONG)) == 0)
+ {
+ sw=len/SHA_CBLOCK;
+ if (sw)
+ {
+ sw*=SHA_CBLOCK;
+ sha_block(c,(ULONG *)data,sw);
+ data+=sw;
+ len-=sw;
+ }
+ }
+#endif
+#endif
/* we now can process the input data in blocks of SHA_CBLOCK
* chars and save the leftovers to c->data. */
p=c->data;
while (len >= SHA_CBLOCK)
{
#if defined(B_ENDIAN) || defined(L_ENDIAN)
- memcpy(p,data,SHA_CBLOCK);
+ if (p != (unsigned long *)data)
+ memcpy(p,data,SHA_CBLOCK);
data+=SHA_CBLOCK;
-#ifdef L_ENDIAN
+# ifdef L_ENDIAN
+# ifndef SHA_ASM /* Will not happen */
for (sw=(SHA_LBLOCK/4); sw; sw--)
{
Endian_Reverse32(p[0]);
@@ -180,18 +210,20 @@ unsigned long len;
Endian_Reverse32(p[3]);
p+=4;
}
-#endif
+ p=c->data;
+# endif
+# endif
#else
- for (sw=(SHA_LBLOCK/4); sw; sw--)
+ for (sw=(SHA_BLOCK/4); sw; sw--)
{
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
- c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
+ M_c2nl(data,l); *(p++)=l;
}
-#endif
p=c->data;
- sha_block(c,p);
+#endif
+ sha_block(c,p,64);
len-=SHA_CBLOCK;
}
ec=(int)len;
@@ -200,16 +232,55 @@ unsigned long len;
ec&=0x03;
for (sw=0; sw < ew; sw++)
- { c2nl(data,l); p[sw]=l; }
- c2nl_p(data,l,ec);
+ { M_c2nl(data,l); p[sw]=l; }
+ M_c2nl_p(data,l,ec);
p[sw]=l;
}
-static void sha_block(c, X)
+void SHA_Transform(c,b)
SHA_CTX *c;
-register unsigned long *X;
+unsigned char *b;
+ {
+ ULONG p[16];
+#if !defined(B_ENDIAN)
+ ULONG *q;
+ int i;
+#endif
+
+#if defined(B_ENDIAN) || defined(L_ENDIAN)
+ memcpy(p,b,64);
+#ifdef L_ENDIAN
+ q=p;
+ for (i=(SHA_LBLOCK/4); i; i--)
+ {
+ Endian_Reverse32(q[0]);
+ Endian_Reverse32(q[1]);
+ Endian_Reverse32(q[2]);
+ Endian_Reverse32(q[3]);
+ q+=4;
+ }
+#endif
+#else
+ q=p;
+ for (i=(SHA_LBLOCK/4); i; i--)
+ {
+ ULONG l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ c2nl(b,l); *(q++)=l;
+ }
+#endif
+ sha_block(c,p,64);
+ }
+
+void sha_block(c, W, num)
+SHA_CTX *c;
+register unsigned long *W;
+int num;
{
register ULONG A,B,C,D,E,T;
+ ULONG X[16];
A=c->h0;
B=c->h1;
@@ -217,95 +288,109 @@ register unsigned long *X;
D=c->h3;
E=c->h4;
- BODY_00_15( 0,A,B,C,D,E,T);
- BODY_00_15( 1,T,A,B,C,D,E);
- BODY_00_15( 2,E,T,A,B,C,D);
- BODY_00_15( 3,D,E,T,A,B,C);
- BODY_00_15( 4,C,D,E,T,A,B);
- BODY_00_15( 5,B,C,D,E,T,A);
- BODY_00_15( 6,A,B,C,D,E,T);
- BODY_00_15( 7,T,A,B,C,D,E);
- BODY_00_15( 8,E,T,A,B,C,D);
- BODY_00_15( 9,D,E,T,A,B,C);
- BODY_00_15(10,C,D,E,T,A,B);
- BODY_00_15(11,B,C,D,E,T,A);
- BODY_00_15(12,A,B,C,D,E,T);
- BODY_00_15(13,T,A,B,C,D,E);
- BODY_00_15(14,E,T,A,B,C,D);
- BODY_00_15(15,D,E,T,A,B,C);
- BODY_16_19(16,C,D,E,T,A,B);
- BODY_16_19(17,B,C,D,E,T,A);
- BODY_16_19(18,A,B,C,D,E,T);
- BODY_16_19(19,T,A,B,C,D,E);
+ for (;;)
+ {
+ BODY_00_15( 0,A,B,C,D,E,T,W);
+ BODY_00_15( 1,T,A,B,C,D,E,W);
+ BODY_00_15( 2,E,T,A,B,C,D,W);
+ BODY_00_15( 3,D,E,T,A,B,C,W);
+ BODY_00_15( 4,C,D,E,T,A,B,W);
+ BODY_00_15( 5,B,C,D,E,T,A,W);
+ BODY_00_15( 6,A,B,C,D,E,T,W);
+ BODY_00_15( 7,T,A,B,C,D,E,W);
+ BODY_00_15( 8,E,T,A,B,C,D,W);
+ BODY_00_15( 9,D,E,T,A,B,C,W);
+ BODY_00_15(10,C,D,E,T,A,B,W);
+ BODY_00_15(11,B,C,D,E,T,A,W);
+ BODY_00_15(12,A,B,C,D,E,T,W);
+ BODY_00_15(13,T,A,B,C,D,E,W);
+ BODY_00_15(14,E,T,A,B,C,D,W);
+ BODY_00_15(15,D,E,T,A,B,C,W);
+ BODY_16_19(16,C,D,E,T,A,B,W,W,W,W);
+ BODY_16_19(17,B,C,D,E,T,A,W,W,W,W);
+ BODY_16_19(18,A,B,C,D,E,T,W,W,W,W);
+ BODY_16_19(19,T,A,B,C,D,E,W,W,W,X);
+
+ BODY_20_31(20,E,T,A,B,C,D,W,W,W,X);
+ BODY_20_31(21,D,E,T,A,B,C,W,W,W,X);
+ BODY_20_31(22,C,D,E,T,A,B,W,W,W,X);
+ BODY_20_31(23,B,C,D,E,T,A,W,W,W,X);
+ BODY_20_31(24,A,B,C,D,E,T,W,W,X,X);
+ BODY_20_31(25,T,A,B,C,D,E,W,W,X,X);
+ BODY_20_31(26,E,T,A,B,C,D,W,W,X,X);
+ BODY_20_31(27,D,E,T,A,B,C,W,W,X,X);
+ BODY_20_31(28,C,D,E,T,A,B,W,W,X,X);
+ BODY_20_31(29,B,C,D,E,T,A,W,W,X,X);
+ BODY_20_31(30,A,B,C,D,E,T,W,X,X,X);
+ BODY_20_31(31,T,A,B,C,D,E,W,X,X,X);
+ BODY_32_39(32,E,T,A,B,C,D,X);
+ BODY_32_39(33,D,E,T,A,B,C,X);
+ BODY_32_39(34,C,D,E,T,A,B,X);
+ BODY_32_39(35,B,C,D,E,T,A,X);
+ BODY_32_39(36,A,B,C,D,E,T,X);
+ BODY_32_39(37,T,A,B,C,D,E,X);
+ BODY_32_39(38,E,T,A,B,C,D,X);
+ BODY_32_39(39,D,E,T,A,B,C,X);
+
+ BODY_40_59(40,C,D,E,T,A,B,X);
+ BODY_40_59(41,B,C,D,E,T,A,X);
+ BODY_40_59(42,A,B,C,D,E,T,X);
+ BODY_40_59(43,T,A,B,C,D,E,X);
+ BODY_40_59(44,E,T,A,B,C,D,X);
+ BODY_40_59(45,D,E,T,A,B,C,X);
+ BODY_40_59(46,C,D,E,T,A,B,X);
+ BODY_40_59(47,B,C,D,E,T,A,X);
+ BODY_40_59(48,A,B,C,D,E,T,X);
+ BODY_40_59(49,T,A,B,C,D,E,X);
+ BODY_40_59(50,E,T,A,B,C,D,X);
+ BODY_40_59(51,D,E,T,A,B,C,X);
+ BODY_40_59(52,C,D,E,T,A,B,X);
+ BODY_40_59(53,B,C,D,E,T,A,X);
+ BODY_40_59(54,A,B,C,D,E,T,X);
+ BODY_40_59(55,T,A,B,C,D,E,X);
+ BODY_40_59(56,E,T,A,B,C,D,X);
+ BODY_40_59(57,D,E,T,A,B,C,X);
+ BODY_40_59(58,C,D,E,T,A,B,X);
+ BODY_40_59(59,B,C,D,E,T,A,X);
- BODY_20_39(20,E,T,A,B,C,D);
- BODY_20_39(21,D,E,T,A,B,C);
- BODY_20_39(22,C,D,E,T,A,B);
- BODY_20_39(23,B,C,D,E,T,A);
- BODY_20_39(24,A,B,C,D,E,T);
- BODY_20_39(25,T,A,B,C,D,E);
- BODY_20_39(26,E,T,A,B,C,D);
- BODY_20_39(27,D,E,T,A,B,C);
- BODY_20_39(28,C,D,E,T,A,B);
- BODY_20_39(29,B,C,D,E,T,A);
- BODY_20_39(30,A,B,C,D,E,T);
- BODY_20_39(31,T,A,B,C,D,E);
- BODY_20_39(32,E,T,A,B,C,D);
- BODY_20_39(33,D,E,T,A,B,C);
- BODY_20_39(34,C,D,E,T,A,B);
- BODY_20_39(35,B,C,D,E,T,A);
- BODY_20_39(36,A,B,C,D,E,T);
- BODY_20_39(37,T,A,B,C,D,E);
- BODY_20_39(38,E,T,A,B,C,D);
- BODY_20_39(39,D,E,T,A,B,C);
+ BODY_60_79(60,A,B,C,D,E,T,X);
+ BODY_60_79(61,T,A,B,C,D,E,X);
+ BODY_60_79(62,E,T,A,B,C,D,X);
+ BODY_60_79(63,D,E,T,A,B,C,X);
+ BODY_60_79(64,C,D,E,T,A,B,X);
+ BODY_60_79(65,B,C,D,E,T,A,X);
+ BODY_60_79(66,A,B,C,D,E,T,X);
+ BODY_60_79(67,T,A,B,C,D,E,X);
+ BODY_60_79(68,E,T,A,B,C,D,X);
+ BODY_60_79(69,D,E,T,A,B,C,X);
+ BODY_60_79(70,C,D,E,T,A,B,X);
+ BODY_60_79(71,B,C,D,E,T,A,X);
+ BODY_60_79(72,A,B,C,D,E,T,X);
+ BODY_60_79(73,T,A,B,C,D,E,X);
+ BODY_60_79(74,E,T,A,B,C,D,X);
+ BODY_60_79(75,D,E,T,A,B,C,X);
+ BODY_60_79(76,C,D,E,T,A,B,X);
+ BODY_60_79(77,B,C,D,E,T,A,X);
+ BODY_60_79(78,A,B,C,D,E,T,X);
+ BODY_60_79(79,T,A,B,C,D,E,X);
+
+ c->h0=(c->h0+E)&0xffffffffL;
+ c->h1=(c->h1+T)&0xffffffffL;
+ c->h2=(c->h2+A)&0xffffffffL;
+ c->h3=(c->h3+B)&0xffffffffL;
+ c->h4=(c->h4+C)&0xffffffffL;
- BODY_40_59(40,C,D,E,T,A,B);
- BODY_40_59(41,B,C,D,E,T,A);
- BODY_40_59(42,A,B,C,D,E,T);
- BODY_40_59(43,T,A,B,C,D,E);
- BODY_40_59(44,E,T,A,B,C,D);
- BODY_40_59(45,D,E,T,A,B,C);
- BODY_40_59(46,C,D,E,T,A,B);
- BODY_40_59(47,B,C,D,E,T,A);
- BODY_40_59(48,A,B,C,D,E,T);
- BODY_40_59(49,T,A,B,C,D,E);
- BODY_40_59(50,E,T,A,B,C,D);
- BODY_40_59(51,D,E,T,A,B,C);
- BODY_40_59(52,C,D,E,T,A,B);
- BODY_40_59(53,B,C,D,E,T,A);
- BODY_40_59(54,A,B,C,D,E,T);
- BODY_40_59(55,T,A,B,C,D,E);
- BODY_40_59(56,E,T,A,B,C,D);
- BODY_40_59(57,D,E,T,A,B,C);
- BODY_40_59(58,C,D,E,T,A,B);
- BODY_40_59(59,B,C,D,E,T,A);
+ num-=64;
+ if (num <= 0) break;
- BODY_60_79(60,A,B,C,D,E,T);
- BODY_60_79(61,T,A,B,C,D,E);
- BODY_60_79(62,E,T,A,B,C,D);
- BODY_60_79(63,D,E,T,A,B,C);
- BODY_60_79(64,C,D,E,T,A,B);
- BODY_60_79(65,B,C,D,E,T,A);
- BODY_60_79(66,A,B,C,D,E,T);
- BODY_60_79(67,T,A,B,C,D,E);
- BODY_60_79(68,E,T,A,B,C,D);
- BODY_60_79(69,D,E,T,A,B,C);
- BODY_60_79(70,C,D,E,T,A,B);
- BODY_60_79(71,B,C,D,E,T,A);
- BODY_60_79(72,A,B,C,D,E,T);
- BODY_60_79(73,T,A,B,C,D,E);
- BODY_60_79(74,E,T,A,B,C,D);
- BODY_60_79(75,D,E,T,A,B,C);
- BODY_60_79(76,C,D,E,T,A,B);
- BODY_60_79(77,B,C,D,E,T,A);
- BODY_60_79(78,A,B,C,D,E,T);
- BODY_60_79(79,T,A,B,C,D,E);
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
- c->h0=(c->h0+E)&0xffffffff;
- c->h1=(c->h1+T)&0xffffffff;
- c->h2=(c->h2+A)&0xffffffff;
- c->h3=(c->h3+B)&0xffffffff;
- c->h4=(c->h4+C)&0xffffffff;
+ W+=16;
+ }
}
void SHA_Final(md, c)
@@ -326,7 +411,7 @@ SHA_CTX *c;
if ((j&0x03) == 0) p[i]=0;
#endif
l=p[i];
- p_c2nl(cp,l,j&0x03);
+ M_p_c2nl(cp,l,j&0x03);
p[i]=l;
i++;
/* i is the next 'undefined word' */
@@ -334,41 +419,24 @@ SHA_CTX *c;
{
for (; i<SHA_LBLOCK; i++)
p[i]=0;
- sha_block(c,p);
+ sha_block(c,p,64);
i=0;
}
for (; i<(SHA_LBLOCK-2); i++)
p[i]=0;
p[SHA_LBLOCK-2]=c->Nh;
p[SHA_LBLOCK-1]=c->Nl;
- sha_block(c,p);
+ sha_block(c,p,64);
cp=md;
l=c->h0; nl2c(l,cp);
l=c->h1; nl2c(l,cp);
l=c->h2; nl2c(l,cp);
l=c->h3; nl2c(l,cp);
l=c->h4; nl2c(l,cp);
+
/* clear stuff, sha_block may be leaving some stuff on the stack
* but I'm not worried :-) */
c->num=0;
/* memset((char *)&c,0,sizeof(c));*/
}
-
-#ifdef undef
-int printit(l)
-unsigned long *l;
- {
- int i,ii;
-
- for (i=0; i<2; i++)
- {
- for (ii=0; ii<8; ii++)
- {
- fprintf(stderr,"%08lx ",l[i*8+ii]);
- }
- fprintf(stderr,"\n");
- }
- }
-#endif
-
diff --git a/crypto/sha/sha_locl.h b/crypto/sha/sha_locl.h
index 0a5cf46990..2814ad15fa 100644
--- a/crypto/sha/sha_locl.h
+++ b/crypto/sha/sha_locl.h
@@ -1,5 +1,5 @@
/* crypto/sha/sha_locl.h */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -120,7 +120,51 @@
*((c)++)=(unsigned char)(((l)>> 8)&0xff), \
*((c)++)=(unsigned char)(((l) )&0xff))
-/* I have taken some of this code from my MD5 implementation */
+#undef c2l
+#define c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
+ l|=(((unsigned long)(*((c)++)))<< 8), \
+ l|=(((unsigned long)(*((c)++)))<<16), \
+ l|=(((unsigned long)(*((c)++)))<<24))
+
+#undef p_c2l
+#define p_c2l(c,l,n) { \
+ switch (n) { \
+ case 0: l =((unsigned long)(*((c)++))); \
+ case 1: l|=((unsigned long)(*((c)++)))<< 8; \
+ case 2: l|=((unsigned long)(*((c)++)))<<16; \
+ case 3: l|=((unsigned long)(*((c)++)))<<24; \
+ } \
+ }
+
+#undef c2l_p
+/* NOTE the pointer is not incremented at the end of this */
+#define c2l_p(c,l,n) { \
+ l=0; \
+ (c)+=n; \
+ switch (n) { \
+ case 3: l =((unsigned long)(*(--(c))))<<16; \
+ case 2: l|=((unsigned long)(*(--(c))))<< 8; \
+ case 1: l|=((unsigned long)(*(--(c)))); \
+ } \
+ }
+
+#undef p_c2l_p
+#define p_c2l_p(c,l,sc,len) { \
+ switch (sc) \
+ { \
+ case 0: l =((unsigned long)(*((c)++))); \
+ if (--len == 0) break; \
+ case 1: l|=((unsigned long)(*((c)++)))<< 8; \
+ if (--len == 0) break; \
+ case 2: l|=((unsigned long)(*((c)++)))<<16; \
+ } \
+ }
+
+#undef l2c
+#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>24)&0xff))
#undef ROTATE
#if defined(WIN32)
@@ -161,38 +205,42 @@
#ifdef SHA_0
#undef Xupdate
-#define Xupdate(a,i) \
- X[(i)&0x0f]=(a)=\
- (X[(i)&0x0f]^X[((i)+2)&0x0f]^X[((i)+8)&0x0f]^X[((i)+13)&0x0f]);
+#define Xupdate(a,i,ia,ib,ic,id) X[(i)&0x0f]=(a)=\
+ (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);
#endif
#ifdef SHA_1
#undef Xupdate
-#define Xupdate(a,i) \
- (a)=(X[(i)&0x0f]^X[((i)+2)&0x0f]^X[((i)+8)&0x0f]^X[((i)+13)&0x0f]); \
+#define Xupdate(a,i,ia,ib,ic,id) (a)=\
+ (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);\
X[(i)&0x0f]=(a)=ROTATE((a),1);
#endif
-#define BODY_00_15(i,a,b,c,d,e,f) \
- (f)=X[i]+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+#define BODY_00_15(i,a,b,c,d,e,f,xa) \
+ (f)=xa[i]+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
(b)=ROTATE((b),30);
-#define BODY_16_19(i,a,b,c,d,e,f) \
- Xupdate(f,i); \
+#define BODY_16_19(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,i,xa,xb,xc,xd); \
(f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
(b)=ROTATE((b),30);
-#define BODY_20_39(i,a,b,c,d,e,f) \
- Xupdate(f,i); \
+#define BODY_20_31(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,i,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_32_39(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
(f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
(b)=ROTATE((b),30);
-#define BODY_40_59(i,a,b,c,d,e,f) \
- Xupdate(f,i); \
+#define BODY_40_59(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
(f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \
(b)=ROTATE((b),30);
-#define BODY_60_79(i,a,b,c,d,e,f) \
- Xupdate(f,i); \
+#define BODY_60_79(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
(f)=X[(i)&0x0f]+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \
(b)=ROTATE((b),30);
diff --git a/crypto/sha/sha_one.c b/crypto/sha/sha_one.c
index 87da617bec..18ab7f61bc 100644
--- a/crypto/sha/sha_one.c
+++ b/crypto/sha/sha_one.c
@@ -1,5 +1,5 @@
/* crypto/sha/sha_one.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
@@ -57,7 +57,7 @@
*/
#include <stdio.h>
-#include "cryptlib.h"
+#include <string.h>
#include "sha.h"
unsigned char *SHA(d, n, md)
diff --git a/crypto/sha/sha_sgst.c b/crypto/sha/sha_sgst.c
new file mode 100644
index 0000000000..8a16801328
--- /dev/null
+++ b/crypto/sha/sha_sgst.c
@@ -0,0 +1,246 @@
+/* crypto/sha/sha_sgst.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef undef
+/* one or the other needs to be defined */
+#ifndef SHA_1 /* FIPE 180-1 */
+#define SHA_0 /* FIPS 180 */
+#endif
+#endif
+
+#define ULONG unsigned long
+#define UCHAR unsigned char
+#define UINT unsigned int
+
+#ifdef NOCONST
+#define const
+#endif
+
+#undef c2nl
+#define c2nl(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
+ l|=(((unsigned long)(*((c)++)))<<16), \
+ l|=(((unsigned long)(*((c)++)))<< 8), \
+ l|=(((unsigned long)(*((c)++))) ))
+
+#undef p_c2nl
+#define p_c2nl(c,l,n) { \
+ switch (n) { \
+ case 0: l =((unsigned long)(*((c)++)))<<24; \
+ case 1: l|=((unsigned long)(*((c)++)))<<16; \
+ case 2: l|=((unsigned long)(*((c)++)))<< 8; \
+ case 3: l|=((unsigned long)(*((c)++))); \
+ } \
+ }
+
+#undef c2nl_p
+/* NOTE the pointer is not incremented at the end of this */
+#define c2nl_p(c,l,n) { \
+ l=0; \
+ (c)+=n; \
+ switch (n) { \
+ case 3: l =((unsigned long)(*(--(c))))<< 8; \
+ case 2: l|=((unsigned long)(*(--(c))))<<16; \
+ case 1: l|=((unsigned long)(*(--(c))))<<24; \
+ } \
+ }
+
+#undef p_c2nl_p
+#define p_c2nl_p(c,l,sc,len) { \
+ switch (sc) \
+ { \
+ case 0: l =((unsigned long)(*((c)++)))<<24; \
+ if (--len == 0) break; \
+ case 1: l|=((unsigned long)(*((c)++)))<<16; \
+ if (--len == 0) break; \
+ case 2: l|=((unsigned long)(*((c)++)))<< 8; \
+ } \
+ }
+
+#undef nl2c
+#define nl2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+ *((c)++)=(unsigned char)(((l) )&0xff))
+
+#undef c2l
+#define c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
+ l|=(((unsigned long)(*((c)++)))<< 8), \
+ l|=(((unsigned long)(*((c)++)))<<16), \
+ l|=(((unsigned long)(*((c)++)))<<24))
+
+#undef p_c2l
+#define p_c2l(c,l,n) { \
+ switch (n) { \
+ case 0: l =((unsigned long)(*((c)++))); \
+ case 1: l|=((unsigned long)(*((c)++)))<< 8; \
+ case 2: l|=((unsigned long)(*((c)++)))<<16; \
+ case 3: l|=((unsigned long)(*((c)++)))<<24; \
+ } \
+ }
+
+#undef c2l_p
+/* NOTE the pointer is not incremented at the end of this */
+#define c2l_p(c,l,n) { \
+ l=0; \
+ (c)+=n; \
+ switch (n) { \
+ case 3: l =((unsigned long)(*(--(c))))<<16; \
+ case 2: l|=((unsigned long)(*(--(c))))<< 8; \
+ case 1: l|=((unsigned long)(*(--(c)))); \
+ } \
+ }
+
+#undef p_c2l_p
+#define p_c2l_p(c,l,sc,len) { \
+ switch (sc) \
+ { \
+ case 0: l =((unsigned long)(*((c)++))); \
+ if (--len == 0) break; \
+ case 1: l|=((unsigned long)(*((c)++)))<< 8; \
+ if (--len == 0) break; \
+ case 2: l|=((unsigned long)(*((c)++)))<<16; \
+ } \
+ }
+
+#undef l2c
+#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>24)&0xff))
+
+#undef ROTATE
+#if defined(WIN32)
+#define ROTATE(a,n) _lrotl(a,n)
+#else
+#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
+#endif
+
+/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
+#if defined(WIN32)
+/* 5 instructions with rotate instruction, else 9 */
+#define Endian_Reverse32(a) \
+ { \
+ unsigned long l=(a); \
+ (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \
+ }
+#else
+/* 6 instructions with rotate instruction, else 8 */
+#define Endian_Reverse32(a) \
+ { \
+ unsigned long l=(a); \
+ l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \
+ (a)=ROTATE(l,16L); \
+ }
+#endif
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be
+ * simplified to the code in F_00_19. Wei attributes these optimisations
+ * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
+ * #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
+ * I've just become aware of another tweak to be made, again from Wei Dai,
+ * in F_40_59, (x&a)|(y&a) -> (x|y)&a
+ */
+#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b,c,d) ((b) ^ (c) ^ (d))
+#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d)))
+#define F_60_79(b,c,d) F_20_39(b,c,d)
+
+#ifdef SHA_0
+#undef Xupdate
+#define Xupdate(a,i,ia,ib,ic,id) X[(i)&0x0f]=(a)=\
+ (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);
+#endif
+#ifdef SHA_1
+#undef Xupdate
+#define Xupdate(a,i,ia,ib,ic,id) (a)=\
+ (ia[(i)&0x0f]^ib[((i)+2)&0x0f]^ic[((i)+8)&0x0f]^id[((i)+13)&0x0f]);\
+ X[(i)&0x0f]=(a)=ROTATE((a),1);
+#endif
+
+#define BODY_00_15(i,a,b,c,d,e,f,xa) \
+ (f)=xa[i]+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_16_19(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,i,xa,xb,xc,xd); \
+ (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_20_31(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,i,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_32_39(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_40_59(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
+ (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_60_79(i,a,b,c,d,e,f,xa) \
+ Xupdate(f,i,xa,xa,xa,xa); \
+ (f)=X[(i)&0x0f]+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
diff --git a/crypto/sha/shatest.c b/crypto/sha/shatest.c
index 28c2e19236..03816e9b39 100644
--- a/crypto/sha/shatest.c
+++ b/crypto/sha/shatest.c
@@ -1,5 +1,5 @@
/* crypto/sha/shatest.c */
-/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written