aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/bf/asm
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/bf/asm')
-rw-r--r--crypto/bf/asm/b-win32.asm1568
-rw-r--r--crypto/bf/asm/bf-586.pl136
-rw-r--r--crypto/bf/asm/bf-686.pl128
-rw-r--r--crypto/bf/asm/bx86unix.cpp965
-rw-r--r--crypto/bf/asm/readme13
5 files changed, 2134 insertions, 676 deletions
diff --git a/crypto/bf/asm/b-win32.asm b/crypto/bf/asm/b-win32.asm
index bef272eebb..138c99d0aa 100644
--- a/crypto/bf/asm/b-win32.asm
+++ b/crypto/bf/asm/b-win32.asm
@@ -1,662 +1,906 @@
- ; Don't even think of reading this code
- ; It was automatically generated by bf586.pl
- ; Which is a perl program used to generate the x86 assember for
- ; any of elf, a.out, Win32, or Solaris
- ; It can be found in SSLeay 0.7.0+
- ; eric <eay@cryptsoft.com>
- ;
- TITLE bfx86xxxx.asm
- .386
-.model FLAT
-_TEXT SEGMENT
-PUBLIC _BF_encrypt
-_BF_encrypt PROC NEAR
- push ebp
- push ebx
- push esi
- push edi
- ;
- ; Load the 2 words
- mov eax, DWORD PTR 20[esp]
- mov ecx, DWORD PTR [eax]
- mov edx, DWORD PTR 4[eax]
- ;
- ; P pointer, s and enc flag
- mov edi, DWORD PTR 24[esp]
- xor eax, eax
- xor ebx, ebx
- mov ebp, DWORD PTR 28[esp]
- cmp ebp, 0
- je $L000start_decrypt
- xor ecx, DWORD PTR [edi]
- ;
- ; Round 0
- ror ecx, 16
- mov esi, DWORD PTR 4[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 1
- ror edx, 16
- mov esi, DWORD PTR 8[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 2
- ror ecx, 16
- mov esi, DWORD PTR 12[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 3
- ror edx, 16
- mov esi, DWORD PTR 16[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 4
- ror ecx, 16
- mov esi, DWORD PTR 20[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 5
- ror edx, 16
- mov esi, DWORD PTR 24[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 6
- ror ecx, 16
- mov esi, DWORD PTR 28[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 7
- ror edx, 16
- mov esi, DWORD PTR 32[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 8
- ror ecx, 16
- mov esi, DWORD PTR 36[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 9
- ror edx, 16
- mov esi, DWORD PTR 40[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 10
- ror ecx, 16
- mov esi, DWORD PTR 44[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 11
- ror edx, 16
- mov esi, DWORD PTR 48[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 12
- ror ecx, 16
- mov esi, DWORD PTR 52[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 13
- ror edx, 16
- mov esi, DWORD PTR 56[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 14
- ror ecx, 16
- mov esi, DWORD PTR 60[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 15
- ror edx, 16
- mov esi, DWORD PTR 64[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- xor edx, DWORD PTR 68[edi]
- mov eax, DWORD PTR 20[esp]
- mov DWORD PTR [eax],edx
- mov DWORD PTR 4[eax],ecx
- pop edi
- pop esi
- pop ebx
- pop ebp
- ret
-$L000start_decrypt:
- xor ecx, DWORD PTR 68[edi]
- ;
- ; Round 16
- ror ecx, 16
- mov esi, DWORD PTR 64[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 15
- ror edx, 16
- mov esi, DWORD PTR 60[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 14
- ror ecx, 16
- mov esi, DWORD PTR 56[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 13
- ror edx, 16
- mov esi, DWORD PTR 52[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 12
- ror ecx, 16
- mov esi, DWORD PTR 48[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 11
- ror edx, 16
- mov esi, DWORD PTR 44[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 10
- ror ecx, 16
- mov esi, DWORD PTR 40[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 9
- ror edx, 16
- mov esi, DWORD PTR 36[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 8
- ror ecx, 16
- mov esi, DWORD PTR 32[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 7
- ror edx, 16
- mov esi, DWORD PTR 28[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 6
- ror ecx, 16
- mov esi, DWORD PTR 24[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 5
- ror edx, 16
- mov esi, DWORD PTR 20[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 4
- ror ecx, 16
- mov esi, DWORD PTR 16[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 3
- ror edx, 16
- mov esi, DWORD PTR 12[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- ;
- ; Round 2
- ror ecx, 16
- mov esi, DWORD PTR 8[edi]
- mov al, ch
- mov bl, cl
- ror ecx, 16
- xor edx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, ch
- mov bl, cl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor edx, esi
- ;
- ; Round 1
- ror edx, 16
- mov esi, DWORD PTR 4[edi]
- mov al, dh
- mov bl, dl
- ror edx, 16
- xor ecx, esi
- mov esi, DWORD PTR 72[eax*4+edi]
- mov ebp, DWORD PTR 1096[ebx*4+edi]
- mov al, dh
- mov bl, dl
- add esi, ebp
- mov eax, DWORD PTR 2120[eax*4+edi]
- xor esi, eax
- mov ebp, DWORD PTR 3144[ebx*4+edi]
- add esi, ebp
- xor eax, eax
- xor ecx, esi
- xor edx, DWORD PTR [edi]
- mov eax, DWORD PTR 20[esp]
- mov DWORD PTR [eax],edx
- mov DWORD PTR 4[eax],ecx
- pop edi
- pop esi
- pop ebx
- pop ebp
- ret
-_BF_encrypt ENDP
-_TEXT ENDS
-END
+ ; Don't even think of reading this code
+ ; It was automatically generated by bf-586.pl
+ ; Which is a perl program used to generate the x86 assember for
+ ; any of elf, a.out, BSDI,Win32, or Solaris
+ ; eric <eay@cryptsoft.com>
+ ;
+ TITLE bf-586.asm
+ .486
+.model FLAT
+_TEXT SEGMENT
+PUBLIC _BF_encrypt
+
+_BF_encrypt PROC NEAR
+ ;
+ push ebp
+ push ebx
+ mov ebx, DWORD PTR 12[esp]
+ mov ebp, DWORD PTR 16[esp]
+ push esi
+ push edi
+ ; Load the 2 words
+ mov edi, DWORD PTR [ebx]
+ mov esi, DWORD PTR 4[ebx]
+ xor eax, eax
+ mov ebx, DWORD PTR [ebp]
+ xor ecx, ecx
+ xor edi, ebx
+ ;
+ ; Round 0
+ mov edx, DWORD PTR 4[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 1
+ mov edx, DWORD PTR 8[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 2
+ mov edx, DWORD PTR 12[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 3
+ mov edx, DWORD PTR 16[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 4
+ mov edx, DWORD PTR 20[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 5
+ mov edx, DWORD PTR 24[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 6
+ mov edx, DWORD PTR 28[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 7
+ mov edx, DWORD PTR 32[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 8
+ mov edx, DWORD PTR 36[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 9
+ mov edx, DWORD PTR 40[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 10
+ mov edx, DWORD PTR 44[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 11
+ mov edx, DWORD PTR 48[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 12
+ mov edx, DWORD PTR 52[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 13
+ mov edx, DWORD PTR 56[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 14
+ mov edx, DWORD PTR 60[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 15
+ mov edx, DWORD PTR 64[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ ; Load parameter 0 (16) enc=1
+ mov eax, DWORD PTR 20[esp]
+ xor edi, ebx
+ mov edx, DWORD PTR 68[ebp]
+ xor esi, edx
+ mov DWORD PTR 4[eax],edi
+ mov DWORD PTR [eax],esi
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ ret
+_BF_encrypt ENDP
+_TEXT ENDS
+_TEXT SEGMENT
+PUBLIC _BF_decrypt
+
+_BF_decrypt PROC NEAR
+ ;
+ push ebp
+ push ebx
+ mov ebx, DWORD PTR 12[esp]
+ mov ebp, DWORD PTR 16[esp]
+ push esi
+ push edi
+ ; Load the 2 words
+ mov edi, DWORD PTR [ebx]
+ mov esi, DWORD PTR 4[ebx]
+ xor eax, eax
+ mov ebx, DWORD PTR 68[ebp]
+ xor ecx, ecx
+ xor edi, ebx
+ ;
+ ; Round 16
+ mov edx, DWORD PTR 64[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 15
+ mov edx, DWORD PTR 60[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 14
+ mov edx, DWORD PTR 56[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 13
+ mov edx, DWORD PTR 52[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 12
+ mov edx, DWORD PTR 48[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 11
+ mov edx, DWORD PTR 44[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 10
+ mov edx, DWORD PTR 40[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 9
+ mov edx, DWORD PTR 36[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 8
+ mov edx, DWORD PTR 32[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 7
+ mov edx, DWORD PTR 28[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 6
+ mov edx, DWORD PTR 24[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 5
+ mov edx, DWORD PTR 20[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 4
+ mov edx, DWORD PTR 16[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 3
+ mov edx, DWORD PTR 12[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor edi, ebx
+ ;
+ ; Round 2
+ mov edx, DWORD PTR 8[ebp]
+ mov ebx, edi
+ xor esi, edx
+ shr ebx, 16
+ mov edx, edi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ xor eax, eax
+ xor esi, ebx
+ ;
+ ; Round 1
+ mov edx, DWORD PTR 4[ebp]
+ mov ebx, esi
+ xor edi, edx
+ shr ebx, 16
+ mov edx, esi
+ mov al, bh
+ and ebx, 255
+ mov cl, dh
+ and edx, 255
+ mov eax, DWORD PTR 72[eax*4+ebp]
+ mov ebx, DWORD PTR 1096[ebx*4+ebp]
+ add ebx, eax
+ mov eax, DWORD PTR 2120[ecx*4+ebp]
+ xor ebx, eax
+ mov edx, DWORD PTR 3144[edx*4+ebp]
+ add ebx, edx
+ ; Load parameter 0 (1) enc=0
+ mov eax, DWORD PTR 20[esp]
+ xor edi, ebx
+ mov edx, DWORD PTR [ebp]
+ xor esi, edx
+ mov DWORD PTR 4[eax],edi
+ mov DWORD PTR [eax],esi
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ ret
+_BF_decrypt ENDP
+_TEXT ENDS
+_TEXT SEGMENT
+PUBLIC _BF_cbc_encrypt
+
+_BF_cbc_encrypt PROC NEAR
+ ;
+ push ebp
+ push ebx
+ push esi
+ push edi
+ mov ebp, DWORD PTR 28[esp]
+ ; getting iv ptr from parameter 4
+ mov ebx, DWORD PTR 36[esp]
+ mov esi, DWORD PTR [ebx]
+ mov edi, DWORD PTR 4[ebx]
+ push edi
+ push esi
+ push edi
+ push esi
+ mov ebx, esp
+ mov esi, DWORD PTR 36[esp]
+ mov edi, DWORD PTR 40[esp]
+ ; getting encrypt flag from parameter 5
+ mov ecx, DWORD PTR 56[esp]
+ ; get and push parameter 3
+ mov eax, DWORD PTR 48[esp]
+ push eax
+ push ebx
+ cmp ecx, 0
+ jz $L000decrypt
+ and ebp, 4294967288
+ mov eax, DWORD PTR 8[esp]
+ mov ebx, DWORD PTR 12[esp]
+ jz $L001encrypt_finish
+L002encrypt_loop:
+ mov ecx, DWORD PTR [esi]
+ mov edx, DWORD PTR 4[esi]
+ xor eax, ecx
+ xor ebx, edx
+ bswap eax
+ bswap ebx
+ mov DWORD PTR 8[esp],eax
+ mov DWORD PTR 12[esp],ebx
+ call _BF_encrypt
+ mov eax, DWORD PTR 8[esp]
+ mov ebx, DWORD PTR 12[esp]
+ bswap eax
+ bswap ebx
+ mov DWORD PTR [edi],eax
+ mov DWORD PTR 4[edi],ebx
+ add esi, 8
+ add edi, 8
+ sub ebp, 8
+ jnz L002encrypt_loop
+$L001encrypt_finish:
+ mov ebp, DWORD PTR 52[esp]
+ and ebp, 7
+ jz $L003finish
+ xor ecx, ecx
+ xor edx, edx
+ mov ebp, DWORD PTR $L004cbc_enc_jmp_table[ebp*4]
+ jmp ebp
+L005ej7:
+ mov dh, BYTE PTR 6[esi]
+ shl edx, 8
+L006ej6:
+ mov dh, BYTE PTR 5[esi]
+L007ej5:
+ mov dl, BYTE PTR 4[esi]
+L008ej4:
+ mov ecx, DWORD PTR [esi]
+ jmp $L009ejend
+L010ej3:
+ mov ch, BYTE PTR 2[esi]
+ shl ecx, 8
+L011ej2:
+ mov ch, BYTE PTR 1[esi]
+L012ej1:
+ mov cl, BYTE PTR [esi]
+$L009ejend:
+ xor eax, ecx
+ xor ebx, edx
+ bswap eax
+ bswap ebx
+ mov DWORD PTR 8[esp],eax
+ mov DWORD PTR 12[esp],ebx
+ call _BF_encrypt
+ mov eax, DWORD PTR 8[esp]
+ mov ebx, DWORD PTR 12[esp]
+ bswap eax
+ bswap ebx
+ mov DWORD PTR [edi],eax
+ mov DWORD PTR 4[edi],ebx
+ jmp $L003finish
+$L000decrypt:
+ and ebp, 4294967288
+ mov eax, DWORD PTR 16[esp]
+ mov ebx, DWORD PTR 20[esp]
+ jz $L013decrypt_finish
+L014decrypt_loop:
+ mov eax, DWORD PTR [esi]
+ mov ebx, DWORD PTR 4[esi]
+ bswap eax
+ bswap ebx
+ mov DWORD PTR 8[esp],eax
+ mov DWORD PTR 12[esp],ebx
+ call _BF_decrypt
+ mov eax, DWORD PTR 8[esp]
+ mov ebx, DWORD PTR 12[esp]
+ bswap eax
+ bswap ebx
+ mov ecx, DWORD PTR 16[esp]
+ mov edx, DWORD PTR 20[esp]
+ xor ecx, eax
+ xor edx, ebx
+ mov eax, DWORD PTR [esi]
+ mov ebx, DWORD PTR 4[esi]
+ mov DWORD PTR [edi],ecx
+ mov DWORD PTR 4[edi],edx
+ mov DWORD PTR 16[esp],eax
+ mov DWORD PTR 20[esp],ebx
+ add esi, 8
+ add edi, 8
+ sub ebp, 8
+ jnz L014decrypt_loop
+$L013decrypt_finish:
+ mov ebp, DWORD PTR 52[esp]
+ and ebp, 7
+ jz $L003finish
+ mov eax, DWORD PTR [esi]
+ mov ebx, DWORD PTR 4[esi]
+ bswap eax
+ bswap ebx
+ mov DWORD PTR 8[esp],eax
+ mov DWORD PTR 12[esp],ebx
+ call _BF_decrypt
+ mov eax, DWORD PTR 8[esp]
+ mov ebx, DWORD PTR 12[esp]
+ bswap eax
+ bswap ebx
+ mov ecx, DWORD PTR 16[esp]
+ mov edx, DWORD PTR 20[esp]
+ xor ecx, eax
+ xor edx, ebx
+ mov eax, DWORD PTR [esi]
+ mov ebx, DWORD PTR 4[esi]
+L015dj7:
+ ror edx, 16
+ mov BYTE PTR 6[edi],dl
+ shr edx, 16
+L016dj6:
+ mov BYTE PTR 5[edi],dh
+L017dj5:
+ mov BYTE PTR 4[edi],dl
+L018dj4:
+ mov DWORD PTR [edi],ecx
+ jmp $L019djend
+L020dj3:
+ ror ecx, 16
+ mov BYTE PTR 2[edi],cl
+ shl ecx, 16
+L021dj2:
+ mov BYTE PTR 1[esi],ch
+L022dj1:
+ mov BYTE PTR [esi], cl
+$L019djend:
+ jmp $L003finish
+$L003finish:
+ mov ecx, DWORD PTR 60[esp]
+ add esp, 24
+ mov DWORD PTR [ecx],eax
+ mov DWORD PTR 4[ecx],ebx
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ ret
+$L004cbc_enc_jmp_table:
+ DD 0
+ DD L012ej1
+ DD L011ej2
+ DD L010ej3
+ DD L008ej4
+ DD L007ej5
+ DD L006ej6
+ DD L005ej7
+L023cbc_dec_jmp_table:
+ DD 0
+ DD L022dj1
+ DD L021dj2
+ DD L020dj3
+ DD L018dj4
+ DD L017dj5
+ DD L016dj6
+ DD L015dj7
+_BF_cbc_encrypt ENDP
+_TEXT ENDS
+END
diff --git a/crypto/bf/asm/bf-586.pl b/crypto/bf/asm/bf-586.pl
new file mode 100644
index 0000000000..252abb710d
--- /dev/null
+++ b/crypto/bf/asm/bf-586.pl
@@ -0,0 +1,136 @@
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+require "cbc.pl";
+
+&asm_init($ARGV[0],"bf-586.pl");
+
+$BF_ROUNDS=16;
+$BF_OFF=($BF_ROUNDS+2)*4;
+$L="edi";
+$R="esi";
+$P="ebp";
+$tmp1="eax";
+$tmp2="ebx";
+$tmp3="ecx";
+$tmp4="edx";
+
+&BF_encrypt("BF_encrypt",1);
+&BF_encrypt("BF_decrypt",0);
+&cbc("BF_cbc_encrypt","BF_encrypt","BF_decrypt",1,4,5,3,-1,-1);
+&asm_finish();
+
+sub BF_encrypt
+ {
+ local($name,$enc)=@_;
+
+ &function_begin_B($name,"");
+
+ &comment("");
+
+ &push("ebp");
+ &push("ebx");
+ &mov($tmp2,&wparam(0));
+ &mov($P,&wparam(1));
+ &push("esi");
+ &push("edi");
+
+ &comment("Load the 2 words");
+ &mov($L,&DWP(0,$tmp2,"",0));
+ &mov($R,&DWP(4,$tmp2,"",0));
+
+ &xor( $tmp1, $tmp1);
+
+ # encrypting part
+
+ if ($enc)
+ {
+ &mov($tmp2,&DWP(0,$P,"",0));
+ &xor( $tmp3, $tmp3);
+
+ &xor($L,$tmp2);
+ for ($i=0; $i<$BF_ROUNDS; $i+=2)
+ {
+ &comment("");
+ &comment("Round $i");
+ &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
+
+ &comment("");
+ &comment("Round ".sprintf("%d",$i+1));
+ &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1);
+ }
+ # &mov($tmp1,&wparam(0)); In last loop
+ &mov($tmp4,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
+ }
+ else
+ {
+ &mov($tmp2,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
+ &xor( $tmp3, $tmp3);
+
+ &xor($L,$tmp2);
+ for ($i=$BF_ROUNDS; $i>0; $i-=2)
+ {
+ &comment("");
+ &comment("Round $i");
+ &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
+ &comment("");
+ &comment("Round ".sprintf("%d",$i-1));
+ &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0);
+ }
+ # &mov($tmp1,&wparam(0)); In last loop
+ &mov($tmp4,&DWP(0,$P,"",0));
+ }
+
+ &xor($R,$tmp4);
+ &mov(&DWP(4,$tmp1,"",0),$L);
+
+ &mov(&DWP(0,$tmp1,"",0),$R);
+ &function_end($name);
+ }
+
+sub BF_ENCRYPT
+ {
+ local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_;
+
+ &mov( $tmp4, &DWP(&n2a($i*4),$P,"",0)); # for next round
+
+ &mov( $tmp2, $R);
+ &xor( $L, $tmp4);
+
+ &shr( $tmp2, 16);
+ &mov( $tmp4, $R);
+
+ &movb( &LB($tmp1), &HB($tmp2)); # A
+ &and( $tmp2, 0xff); # B
+
+ &movb( &LB($tmp3), &HB($tmp4)); # C
+ &and( $tmp4, 0xff); # D
+
+ &mov( $tmp1, &DWP(&n2a($BF_OFF+0x0000),$P,$tmp1,4));
+ &mov( $tmp2, &DWP(&n2a($BF_OFF+0x0400),$P,$tmp2,4));
+
+ &add( $tmp2, $tmp1);
+ &mov( $tmp1, &DWP(&n2a($BF_OFF+0x0800),$P,$tmp3,4));
+
+ &xor( $tmp2, $tmp1);
+ &mov( $tmp4, &DWP(&n2a($BF_OFF+0x0C00),$P,$tmp4,4));
+
+ &add( $tmp2, $tmp4);
+ if (($enc && ($i != 16)) || ((!$enc) && ($i != 1)))
+ { &xor( $tmp1, $tmp1); }
+ else
+ {
+ &comment("Load parameter 0 ($i) enc=$enc");
+ &mov($tmp1,&wparam(0));
+ } # In last loop
+
+ &xor( $L, $tmp2);
+ # delay
+ }
+
+sub n2a
+ {
+ sprintf("%d",$_[0]);
+ }
+
diff --git a/crypto/bf/asm/bf-686.pl b/crypto/bf/asm/bf-686.pl
new file mode 100644
index 0000000000..7a62f67161
--- /dev/null
+++ b/crypto/bf/asm/bf-686.pl
@@ -0,0 +1,128 @@
+#!/usr/local/bin/perl
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+require "cbc.pl";
+
+&asm_init($ARGV[0],"bf-686.pl");
+
+$BF_ROUNDS=16;
+$BF_OFF=($BF_ROUNDS+2)*4;
+$L="ecx";
+$R="edx";
+$P="edi";
+$tot="esi";
+$tmp1="eax";
+$tmp2="ebx";
+$tmp3="ebp";
+
+&des_encrypt("BF_encrypt",1);
+&des_encrypt("BF_decrypt",0);
+&cbc("BF_cbc_encrypt","BF_encrypt","BF_decrypt",1,4,5,3,-1,-1);
+
+&asm_finish();
+
+&file_end();
+
+sub des_encrypt
+ {
+ local($name,$enc)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ &comment("Load the 2 words");
+ &mov("eax",&wparam(0));
+ &mov($L,&DWP(0,"eax","",0));
+ &mov($R,&DWP(4,"eax","",0));
+
+ &comment("");
+ &comment("P pointer, s and enc flag");
+ &mov($P,&wparam(1));
+
+ &xor( $tmp1, $tmp1);
+ &xor( $tmp2, $tmp2);
+
+ # encrypting part
+
+ if ($enc)
+ {
+ &xor($L,&DWP(0,$P,"",0));
+ for ($i=0; $i<$BF_ROUNDS; $i+=2)
+ {
+ &comment("");
+ &comment("Round $i");
+ &BF_ENCRYPT($i+1,$R,$L,$P,$tot,$tmp1,$tmp2,$tmp3);
+
+ &comment("");
+ &comment("Round ".sprintf("%d",$i+1));
+ &BF_ENCRYPT($i+2,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3);
+ }
+ &xor($R,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
+
+ &mov("eax",&wparam(0));
+ &mov(&DWP(0,"eax","",0),$R);
+ &mov(&DWP(4,"eax","",0),$L);
+ &function_end_A($name);
+ }
+ else
+ {
+ &xor($L,&DWP(($BF_ROUNDS+1)*4,$P,"",0));
+ for ($i=$BF_ROUNDS; $i>0; $i-=2)
+ {
+ &comment("");
+ &comment("Round $i");
+ &BF_ENCRYPT($i,$R,$L,$P,$tot,$tmp1,$tmp2,$tmp3);
+ &comment("");
+ &comment("Round ".sprintf("%d",$i-1));
+ &BF_ENCRYPT($i-1,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3);
+ }
+ &xor($R,&DWP(0,$P,"",0));
+
+ &mov("eax",&wparam(0));
+ &mov(&DWP(0,"eax","",0),$R);
+ &mov(&DWP(4,"eax","",0),$L);
+ &function_end_A($name);
+ }
+
+ &function_end_B($name);
+ }
+
+sub BF_ENCRYPT
+ {
+ local($i,$L,$R,$P,$tot,$tmp1,$tmp2,$tmp3)=@_;
+
+ &rotr( $R, 16);
+ &mov( $tot, &DWP(&n2a($i*4),$P,"",0));
+
+ &movb( &LB($tmp1), &HB($R));
+ &movb( &LB($tmp2), &LB($R));
+
+ &rotr( $R, 16);
+ &xor( $L, $tot);
+
+ &mov( $tot, &DWP(&n2a($BF_OFF+0x0000),$P,$tmp1,4));
+ &mov( $tmp3, &DWP(&n2a($BF_OFF+0x0400),$P,$tmp2,4));
+
+ &movb( &LB($tmp1), &HB($R));
+ &movb( &LB($tmp2), &LB($R));
+
+ &add( $tot, $tmp3);
+ &mov( $tmp1, &DWP(&n2a($BF_OFF+0x0800),$P,$tmp1,4)); # delay
+
+ &xor( $tot, $tmp1);
+ &mov( $tmp3, &DWP(&n2a($BF_OFF+0x0C00),$P,$tmp2,4));
+
+ &add( $tot, $tmp3);
+ &xor( $tmp1, $tmp1);
+
+ &xor( $L, $tot);
+ # delay
+ }
+
+sub n2a
+ {
+ sprintf("%d",$_[0]);
+ }
+
diff --git a/crypto/bf/asm/bx86unix.cpp b/crypto/bf/asm/bx86unix.cpp
index dcb10d23dd..cdaa269378 100644
--- a/crypto/bf/asm/bx86unix.cpp
+++ b/crypto/bf/asm/bx86unix.cpp
@@ -1,24 +1,37 @@
+/* Run the C pre-processor over this file with one of the following defined
+ * ELF - elf object files,
+ * OUT - a.out object files,
+ * BSDI - BSDI style a.out object files
+ * SOL - Solaris style elf
+ */
-#define TYPE(a,b) .type a,b
-#define SIZE(a,b) .size a,b
+#define TYPE(a,b) .type a,b
+#define SIZE(a,b) .size a,b
+
+#if defined(OUT) || defined(BSDI)
+#define BF_encrypt _BF_encrypt
+#define BF_decrypt _BF_decrypt
+#define BF_cbc_encrypt _BF_cbc_encrypt
+
+#endif
#ifdef OUT
-#define OK 1
-#define BF_encrypt _BF_encrypt
-#define ALIGN 4
+#define OK 1
+#define ALIGN 4
#endif
#ifdef BSDI
-#define OK 1
-#define BF_encrypt _BF_encrypt
-#define ALIGN 4
+#define OK 1
+#define ALIGN 4
#undef SIZE
#undef TYPE
+#define SIZE(a,b)
+#define TYPE(a,b)
#endif
#if defined(ELF) || defined(SOL)
-#define OK 1
-#define ALIGN 16
+#define OK 1
+#define ALIGN 16
#endif
#ifndef OK
@@ -29,5 +42,935 @@ SOL - solaris systems, which are elf with strange comment lines
BSDI - a.out with a very primative version of as.
#endif
-#include "bx86-cpp.s"
+/* Let the Assembler begin :-) */
+ /* Don't even think of reading this code */
+ /* It was automatically generated by bf-586.pl */
+ /* Which is a perl program used to generate the x86 assember for */
+ /* any of elf, a.out, BSDI,Win32, or Solaris */
+ /* eric <eay@cryptsoft.com> */
+
+ .file "bf-586.s"
+ .version "01.01"
+gcc2_compiled.:
+.text
+ .align ALIGN
+.globl BF_encrypt
+ TYPE(BF_encrypt,@function)
+BF_encrypt:
+
+ pushl %ebp
+ pushl %ebx
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ebp
+ pushl %esi
+ pushl %edi
+ /* Load the 2 words */
+ movl (%ebx), %edi
+ movl 4(%ebx), %esi
+ xorl %eax, %eax
+ movl (%ebp), %ebx
+ xorl %ecx, %ecx
+ xorl %ebx, %edi
+
+ /* Round 0 */
+ movl 4(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 1 */
+ movl 8(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 2 */
+ movl 12(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 3 */
+ movl 16(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 4 */
+ movl 20(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 5 */
+ movl 24(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 6 */
+ movl 28(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 7 */
+ movl 32(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 8 */
+ movl 36(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 9 */
+ movl 40(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 10 */
+ movl 44(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 11 */
+ movl 48(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 12 */
+ movl 52(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 13 */
+ movl 56(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 14 */
+ movl 60(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 15 */
+ movl 64(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ /* Load parameter 0 (16) enc=1 */
+ movl 20(%esp), %eax
+ xorl %ebx, %edi
+ movl 68(%ebp), %edx
+ xorl %edx, %esi
+ movl %edi, 4(%eax)
+ movl %esi, (%eax)
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.BF_encrypt_end:
+ SIZE(BF_encrypt,.BF_encrypt_end-BF_encrypt)
+.ident "BF_encrypt"
+.text
+ .align ALIGN
+.globl BF_decrypt
+ TYPE(BF_decrypt,@function)
+BF_decrypt:
+
+ pushl %ebp
+ pushl %ebx
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ebp
+ pushl %esi
+ pushl %edi
+ /* Load the 2 words */
+ movl (%ebx), %edi
+ movl 4(%ebx), %esi
+ xorl %eax, %eax
+ movl 68(%ebp), %ebx
+ xorl %ecx, %ecx
+ xorl %ebx, %edi
+
+ /* Round 16 */
+ movl 64(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 15 */
+ movl 60(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 14 */
+ movl 56(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 13 */
+ movl 52(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 12 */
+ movl 48(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 11 */
+ movl 44(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 10 */
+ movl 40(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 9 */
+ movl 36(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 8 */
+ movl 32(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 7 */
+ movl 28(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 6 */
+ movl 24(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 5 */
+ movl 20(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 4 */
+ movl 16(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 3 */
+ movl 12(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %edi
+
+ /* Round 2 */
+ movl 8(%ebp), %edx
+ movl %edi, %ebx
+ xorl %edx, %esi
+ shrl $16, %ebx
+ movl %edi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ xorl %eax, %eax
+ xorl %ebx, %esi
+
+ /* Round 1 */
+ movl 4(%ebp), %edx
+ movl %esi, %ebx
+ xorl %edx, %edi
+ shrl $16, %ebx
+ movl %esi, %edx
+ movb %bh, %al
+ andl $255, %ebx
+ movb %dh, %cl
+ andl $255, %edx
+ movl 72(%ebp,%eax,4),%eax
+ movl 1096(%ebp,%ebx,4),%ebx
+ addl %eax, %ebx
+ movl 2120(%ebp,%ecx,4),%eax
+ xorl %eax, %ebx
+ movl 3144(%ebp,%edx,4),%edx
+ addl %edx, %ebx
+ /* Load parameter 0 (1) enc=0 */
+ movl 20(%esp), %eax
+ xorl %ebx, %edi
+ movl (%ebp), %edx
+ xorl %edx, %esi
+ movl %edi, 4(%eax)
+ movl %esi, (%eax)
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.BF_decrypt_end:
+ SIZE(BF_decrypt,.BF_decrypt_end-BF_decrypt)
+.ident "BF_decrypt"
+.text
+ .align ALIGN
+.globl BF_cbc_encrypt
+ TYPE(BF_cbc_encrypt,@function)
+BF_cbc_encrypt:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ movl 28(%esp), %ebp
+ /* getting iv ptr from parameter 4 */
+ movl 36(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ pushl %edi
+ pushl %esi
+ pushl %edi
+ pushl %esi
+ movl %esp, %ebx
+ movl 36(%esp), %esi
+ movl 40(%esp), %edi
+ /* getting encrypt flag from parameter 5 */
+ movl 56(%esp), %ecx
+ /* get and push parameter 3 */
+ movl 48(%esp), %eax
+ pushl %eax
+ pushl %ebx
+ cmpl $0, %ecx
+ jz .L000decrypt
+ andl $4294967288, %ebp
+ movl 8(%esp), %eax
+ movl 12(%esp), %ebx
+ jz .L001encrypt_finish
+.L002encrypt_loop:
+ movl (%esi), %ecx
+ movl 4(%esi), %edx
+ xorl %ecx, %eax
+ xorl %edx, %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, 8(%esp)
+ movl %ebx, 12(%esp)
+ call BF_encrypt
+ movl 8(%esp), %eax
+ movl 12(%esp), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, (%edi)
+ movl %ebx, 4(%edi)
+ addl $8, %esi
+ addl $8, %edi
+ subl $8, %ebp
+ jnz .L002encrypt_loop
+.L001encrypt_finish:
+ movl 52(%esp), %ebp
+ andl $7, %ebp
+ jz .L003finish
+ xorl %ecx, %ecx
+ xorl %edx, %edx
+ movl .L004cbc_enc_jmp_table(,%ebp,4),%ebp
+ jmp *%ebp
+.L005ej7:
+ movb 6(%esi), %dh
+ sall $8, %edx
+.L006ej6:
+ movb 5(%esi), %dh
+.L007ej5:
+ movb 4(%esi), %dl
+.L008ej4:
+ movl (%esi), %ecx
+ jmp .L009ejend
+.L010ej3:
+ movb 2(%esi), %ch
+ sall $8, %ecx
+.L011ej2:
+ movb 1(%esi), %ch
+.L012ej1:
+ movb (%esi), %cl
+.L009ejend:
+ xorl %ecx, %eax
+ xorl %edx, %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, 8(%esp)
+ movl %ebx, 12(%esp)
+ call BF_encrypt
+ movl 8(%esp), %eax
+ movl 12(%esp), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, (%edi)
+ movl %ebx, 4(%edi)
+ jmp .L003finish
+.align ALIGN
+.L000decrypt:
+ andl $4294967288, %ebp
+ movl 16(%esp), %eax
+ movl 20(%esp), %ebx
+ jz .L013decrypt_finish
+.L014decrypt_loop:
+ movl (%esi), %eax
+ movl 4(%esi), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, 8(%esp)
+ movl %ebx, 12(%esp)
+ call BF_decrypt
+ movl 8(%esp), %eax
+ movl 12(%esp), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl 16(%esp), %ecx
+ movl 20(%esp), %edx
+ xorl %eax, %ecx
+ xorl %ebx, %edx
+ movl (%esi), %eax
+ movl 4(%esi), %ebx
+ movl %ecx, (%edi)
+ movl %edx, 4(%edi)
+ movl %eax, 16(%esp)
+ movl %ebx, 20(%esp)
+ addl $8, %esi
+ addl $8, %edi
+ subl $8, %ebp
+ jnz .L014decrypt_loop
+.L013decrypt_finish:
+ movl 52(%esp), %ebp
+ andl $7, %ebp
+ jz .L003finish
+ movl (%esi), %eax
+ movl 4(%esi), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl %eax, 8(%esp)
+ movl %ebx, 12(%esp)
+ call BF_decrypt
+ movl 8(%esp), %eax
+ movl 12(%esp), %ebx
+.byte 15
+.byte 200 /* bswapl %eax */
+.byte 15
+.byte 203 /* bswapl %ebx */
+ movl 16(%esp), %ecx
+ movl 20(%esp), %edx
+ xorl %eax, %ecx
+ xorl %ebx, %edx
+ movl (%esi), %eax
+ movl 4(%esi), %ebx
+.L015dj7:
+ rorl $16, %edx
+ movb %dl, 6(%edi)
+ shrl $16, %edx
+.L016dj6:
+ movb %dh, 5(%edi)
+.L017dj5:
+ movb %dl, 4(%edi)
+.L018dj4:
+ movl %ecx, (%edi)
+ jmp .L019djend
+.L020dj3:
+ rorl $16, %ecx
+ movb %cl, 2(%edi)
+ sall $16, %ecx
+.L021dj2:
+ movb %ch, 1(%esi)
+.L022dj1:
+ movb %cl, (%esi)
+.L019djend:
+ jmp .L003finish
+.align ALIGN
+.L003finish:
+ movl 60(%esp), %ecx
+ addl $24, %esp
+ movl %eax, (%ecx)
+ movl %ebx, 4(%ecx)
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.align ALIGN
+.L004cbc_enc_jmp_table:
+ .long 0
+ .long .L012ej1
+ .long .L011ej2
+ .long .L010ej3
+ .long .L008ej4
+ .long .L007ej5
+ .long .L006ej6
+ .long .L005ej7
+.align ALIGN
+.L023cbc_dec_jmp_table:
+ .long 0
+ .long .L022dj1
+ .long .L021dj2
+ .long .L020dj3
+ .long .L018dj4
+ .long .L017dj5
+ .long .L016dj6
+ .long .L015dj7
+.BF_cbc_encrypt_end:
+ SIZE(BF_cbc_encrypt,.BF_cbc_encrypt_end-BF_cbc_encrypt)
+.ident "desasm.pl"
diff --git a/crypto/bf/asm/readme b/crypto/bf/asm/readme
index 71e4bb2d5d..2385fa3812 100644
--- a/crypto/bf/asm/readme
+++ b/crypto/bf/asm/readme
@@ -1,3 +1,10 @@
-If you want more of an idea of how this all works,
-have a read of the readme file in SSLeay/crypto/des/asm.
-SSLeay can be found at ftp://ftp.psy.uq.oz.au/pub/Crypto/SSL.
+There are blowfish assembler generation scripts.
+bf-586.pl version is for the pentium and
+bf-686.pl is my original version, which is faster on the pentium pro.
+
+When using a bf-586.pl, the pentium pro/II is %8 slower than using
+bf-686.pl. When using a bf-686.pl, the pentium is %16 slower
+than bf-586.pl
+
+So the default is bf-586.pl
+