aboutsummaryrefslogtreecommitdiffstats
path: root/engines/e_padlock.c
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2011-09-06 20:45:36 +0000
committerAndy Polyakov <appro@openssl.org>2011-09-06 20:45:36 +0000
commited28aef8b455be436f252dfceac49a958a92e53b (patch)
treecd34319f2128612e24d78d06571cdc4c24eab614 /engines/e_padlock.c
parent0486cce653b62d26a8ca37ac12f69f1a6b998844 (diff)
downloadopenssl-ed28aef8b455be436f252dfceac49a958a92e53b.tar.gz
Padlock engine: make it independent of inline assembler.
Diffstat (limited to 'engines/e_padlock.c')
-rw-r--r--engines/e_padlock.c969
1 files changed, 181 insertions, 788 deletions
diff --git a/engines/e_padlock.c b/engines/e_padlock.c
index 271253dada..7f78135165 100644
--- a/engines/e_padlock.c
+++ b/engines/e_padlock.c
@@ -95,17 +95,14 @@
/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
Not only that it doesn't exist elsewhere, but it
- even can't be compiled on other platforms!
+ even can't be compiled on other platforms! */
- In addition, because of the heavy use of inline assembler,
- compiler choice is limited to GCC and Microsoft C. */
#undef COMPILE_HW_PADLOCK
-#if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
-# if (defined(__GNUC__) && __GNUC__>=2 && \
- (defined(__i386__) || defined(__i386) || \
- defined(__x86_64__) || defined(__x86_64)) \
- ) || \
- (defined(_MSC_VER) && defined(_M_IX86))
+#if !defined(I386_ONLY) && !defined(OPENSSL_NO_ASM)
+# if defined(__i386__) || defined(__i386) || \
+ defined(__x86_64__) || defined(__x86_64) || \
+ defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
+ defined(__INTEL__)
# define COMPILE_HW_PADLOCK
# ifdef OPENSSL_NO_DYNAMIC_ENGINE
static ENGINE *ENGINE_padlock (void);
@@ -130,19 +127,6 @@ void ENGINE_load_padlock (void)
#endif
#ifdef COMPILE_HW_PADLOCK
-/* We do these includes here to avoid header problems on platforms that
- do not have the VIA padlock anyway... */
-#include <stdlib.h>
-#ifdef _WIN32
-# include <malloc.h>
-# ifndef alloca
-# define alloca _alloca
-# endif
-#elif defined(__GNUC__)
-# ifndef alloca
-# define alloca(s) __builtin_alloca((s))
-# endif
-#endif
/* Function for ENGINE detection and control */
static int padlock_available(void);
@@ -163,9 +147,6 @@ static char padlock_name[100];
/* Available features */
static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
static int padlock_use_rng = 0; /* Random Number Generator */
-#ifndef OPENSSL_NO_AES
-static int padlock_aes_align_required = 1;
-#endif
/* ===== Engine "management" functions ===== */
@@ -284,202 +265,37 @@ struct padlock_cipher_data
} cword; /* Control word */
AES_KEY ks; /* Encryption key */
};
-
-/*
- * Essentially this variable belongs in thread local storage.
- * Having this variable global on the other hand can only cause
- * few bogus key reloads [if any at all on single-CPU system],
- * so we accept the penatly...
- */
-static volatile struct padlock_cipher_data *padlock_saved_context;
-#endif
-
-/*
- * =======================================================
- * Inline assembler section(s).
- * =======================================================
- * Order of arguments is chosen to facilitate Windows port
- * using __fastcall calling convention. If you wish to add
- * more routines, keep in mind that first __fastcall
- * argument is passed in %ecx and second - in %edx.
- * =======================================================
- */
-#if defined(__GNUC__) && __GNUC__>=2
-#if defined(__i386__) || defined(__i386)
-/*
- * As for excessive "push %ebx"/"pop %ebx" found all over.
- * When generating position-independent code GCC won't let
- * us use "b" in assembler templates nor even respect "ebx"
- * in "clobber description." Therefore the trouble...
- */
-
-/* Helper function - check if a CPUID instruction
- is available on this CPU */
-static int
-padlock_insn_cpuid_available(void)
-{
- int result = -1;
-
- /* We're checking if the bit #21 of EFLAGS
- can be toggled. If yes = CPUID is available. */
- asm volatile (
- "pushf\n"
- "popl %%eax\n"
- "xorl $0x200000, %%eax\n"
- "movl %%eax, %%ecx\n"
- "andl $0x200000, %%ecx\n"
- "pushl %%eax\n"
- "popf\n"
- "pushf\n"
- "popl %%eax\n"
- "andl $0x200000, %%eax\n"
- "xorl %%eax, %%ecx\n"
- "movl %%ecx, %0\n"
- : "=r" (result) : : "eax", "ecx");
-
- return (result == 0);
-}
-
-/* Load supported features of the CPU to see if
- the PadLock is available. */
-static int
-padlock_available(void)
-{
- char vendor_string[16];
- unsigned int eax, edx;
-
- /* First check if the CPUID instruction is available at all... */
- if (! padlock_insn_cpuid_available())
- return 0;
-
- /* Are we running on the Centaur (VIA) CPU? */
- eax = 0x00000000;
- vendor_string[12] = 0;
- asm volatile (
- "pushl %%ebx\n"
- "cpuid\n"
- "movl %%ebx,(%%edi)\n"
- "movl %%edx,4(%%edi)\n"
- "movl %%ecx,8(%%edi)\n"
- "popl %%ebx"
- : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
- if (strcmp(vendor_string, "CentaurHauls") != 0)
- return 0;
-
- /* Check for Centaur Extended Feature Flags presence */
- eax = 0xC0000000;
- asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax) : : "ecx", "edx");
- if (eax < 0xC0000001)
- return 0;
-
- /* Read the Centaur Extended Feature Flags */
- eax = 0xC0000001;
- asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
- : "+a"(eax), "=d"(edx) : : "ecx");
-
- /* Fill up some flags */
- padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
- padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
-
- return padlock_use_ace + padlock_use_rng;
-}
-
-/* Force key reload from memory to the CPU microcode.
- Loading EFLAGS from the stack clears EFLAGS[30]
- which does the trick. */
-static inline void
-padlock_reload_key(void)
-{
- asm volatile ("pushfl; popfl");
-}
-
-#ifndef OPENSSL_NO_AES
-/*
- * This is heuristic key context tracing. At first one
- * believes that one should use atomic swap instructions,
- * but it's not actually necessary. Point is that if
- * padlock_saved_context was changed by another thread
- * after we've read it and before we compare it with cdata,
- * our key *shall* be reloaded upon thread context switch
- * and we are therefore set in either case...
- */
-static inline void
-padlock_verify_context(struct padlock_cipher_data *cdata)
-{
- asm volatile (
- "pushfl\n"
-" btl $30,(%%esp)\n"
-" jnc 1f\n"
-" cmpl %2,%1\n"
-" je 1f\n"
-" popfl\n"
-" subl $4,%%esp\n"
-"1: addl $4,%%esp\n"
-" movl %2,%0"
- :"+m"(padlock_saved_context)
- : "r"(padlock_saved_context), "r"(cdata) : "cc");
-}
-
-/* Template for padlock_xcrypt_* modes */
-/* BIG FAT WARNING:
- * The offsets used with 'leal' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
- */
-#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
-static inline void *name(size_t cnt, \
- struct padlock_cipher_data *cdata, \
- void *out, const void *inp) \
-{ void *iv; \
- asm volatile ( "pushl %%ebx\n" \
- " leal 16(%0),%%edx\n" \
- " leal 32(%0),%%ebx\n" \
- rep_xcrypt "\n" \
- " popl %%ebx" \
- : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
- : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
- : "edx", "cc", "memory"); \
- return iv; \
-}
#endif
-#elif defined(__x86_64__) || defined(__x86_64)
+/* Interface to assembler module */
+unsigned int padlock_capability();
+void padlock_key_bswap(AES_KEY *key);
+void padlock_verify_context(struct padlock_cipher_data *ctx);
+void padlock_reload_key();
+void padlock_aes_block(void *out, const void *inp,
+ struct padlock_cipher_data *ctx);
+int padlock_ecb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_cbc_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_cfb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_ofb_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_ctr32_encrypt(void *out, const void *inp,
+ struct padlock_cipher_data *ctx, size_t len);
+int padlock_xstore(void *out,int edx);
+void padlock_sha1_oneshot(void *ctx,const void *inp,size_t len);
+void padlock_sha1(void *ctx,const void *inp,size_t len);
+void padlock_sha256_oneshot(void *ctx,const void *inp,size_t len);
+void padlock_sha256(void *ctx,const void *inp,size_t len);
/* Load supported features of the CPU to see if
the PadLock is available. */
static int
padlock_available(void)
{
- char vendor_string[16];
- unsigned int eax, edx;
- size_t scratch;
-
- /* Are we running on the Centaur (VIA) CPU? */
- eax = 0x00000000;
- vendor_string[12] = 0;
- asm volatile (
- "movq %%rbx,%1\n"
- "cpuid\n"
- "movl %%ebx,(%2)\n"
- "movl %%edx,4(%2)\n"
- "movl %%ecx,8(%2)\n"
- "movq %1,%%rbx"
- : "+a"(eax), "=&r"(scratch) : "r"(vendor_string) : "rcx", "rdx");
- if (strcmp(vendor_string, "CentaurHauls") != 0)
- return 0;
-
- /* Check for Centaur Extended Feature Flags presence */
- eax = 0xC0000000;
- asm volatile ("movq %%rbx,%1; cpuid; movq %1,%%rbx"
- : "+a"(eax), "=&r"(scratch) : : "rcx", "rdx");
- if (eax < 0xC0000001)
- return 0;
-
- /* Read the Centaur Extended Feature Flags */
- eax = 0xC0000001;
- asm volatile ("movq %%rbx,%2; cpuid; movq %2,%%rbx"
- : "+a"(eax), "=d"(edx), "=&r"(scratch) : : "rcx");
+ unsigned int edx = padlock_capability();
/* Fill up some flags */
padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
@@ -488,257 +304,6 @@ padlock_available(void)
return padlock_use_ace + padlock_use_rng;
}
-/* Force key reload from memory to the CPU microcode.
- Loading EFLAGS from the stack clears EFLAGS[30]
- which does the trick. */
-static inline void
-padlock_reload_key(void)
-{
- asm volatile ("pushfq; popfq");
-}
-
-#ifndef OPENSSL_NO_AES
-/*
- * This is heuristic key context tracing. At first one
- * believes that one should use atomic swap instructions,
- * but it's not actually necessary. Point is that if
- * padlock_saved_context was changed by another thread
- * after we've read it and before we compare it with cdata,
- * our key *shall* be reloaded upon thread context switch
- * and we are therefore set in either case...
- */
-static inline void
-padlock_verify_context(struct padlock_cipher_data *cdata)
-{
- asm volatile (
- "pushfq\n"
-" btl $30,(%%rsp)\n"
-" jnc 1f\n"
-" cmpq %2,%1\n"
-" je 1f\n"
-" popfq\n"
-" subq $8,%%rsp\n"
-"1: addq $8,%%rsp\n"
-" movq %2,%0"
- :"+m"(padlock_saved_context)
- : "r"(padlock_saved_context), "r"(cdata) : "cc");
-}
-
-/* Template for padlock_xcrypt_* modes */
-/* BIG FAT WARNING:
- * The offsets used with 'leal' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
- */
-#define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
-static inline void *name(size_t cnt, \
- struct padlock_cipher_data *cdata, \
- void *out, const void *inp) \
-{ void *iv; \
- size_t scratch; \
- asm volatile ( "movq %%rbx,%4\n" \
- " leaq 16(%0),%%rdx\n" \
- " leaq 32(%0),%%rbx\n" \
- rep_xcrypt "\n" \
- " movq %4,%%rbx" \
- : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp), "=&r"(scratch) \
- : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
- : "rdx", "cc", "memory"); \
- return iv; \
-}
-#endif
-
-#endif /* cpu */
-
-#ifndef OPENSSL_NO_AES
-/* Generate all functions with appropriate opcodes */
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */
-
-/* Our own htonl()/ntohl() */
-static inline void
-padlock_bswapl(AES_KEY *ks)
-{
- size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
- unsigned int *key = ks->rd_key;
-
- while (i--) {
- asm volatile ("bswapl %0" : "+r"(*key));
- key++;
- }
-}
-#endif
-
-/* The RNG call itself */
-static inline unsigned int
-padlock_xstore(void *addr, unsigned int edx_in)
-{
- unsigned int eax_out;
-
- asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */
- : "=a"(eax_out),"=m"(*(unsigned *)addr)
- : "D"(addr), "d" (edx_in)
- );
-
- return eax_out;
-}
-
-/* Why not inline 'rep movsd'? I failed to find information on what
- * value in Direction Flag one can expect and consequently have to
- * apply "better-safe-than-sorry" approach and assume "undefined."
- * I could explicitly clear it and restore the original value upon
- * return from padlock_aes_cipher, but it's presumably too much
- * trouble for too little gain...
- *
- * In case you wonder 'rep xcrypt*' instructions above are *not*
- * affected by the Direction Flag and pointers advance toward
- * larger addresses unconditionally.
- */
-static inline unsigned char *
-padlock_memcpy(void *dst,const void *src,size_t n)
-{
- size_t *d=dst;
- const size_t *s=src;
-
- n /= sizeof(*d);
- do { *d++ = *s++; } while (--n);
-
- return dst;
-}
-
-#elif defined(_MSC_VER)
-/*
- * Unlike GCC these are real functions. In order to minimize impact
- * on performance we adhere to __fastcall calling convention in
- * order to get two first arguments passed through %ecx and %edx.
- * Which kind of suits very well, as instructions in question use
- * both %ecx and %edx as input:-)
- */
-#define REP_XCRYPT(code) \
- _asm _emit 0xf3 \
- _asm _emit 0x0f _asm _emit 0xa7 \
- _asm _emit code
-
-/* BIG FAT WARNING:
- * The offsets used with 'lea' instructions
- * describe items of the 'padlock_cipher_data'
- * structure.
- */
-#define PADLOCK_XCRYPT_ASM(name,code) \
-static void * __fastcall \
- name (size_t cnt, void *cdata, \
- void *outp, const void *inp) \
-{ _asm mov eax,edx \
- _asm lea edx,[eax+16] \
- _asm lea ebx,[eax+32] \
- _asm mov edi,outp \
- _asm mov esi,inp \
- REP_XCRYPT(code) \
-}
-
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8)
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0)
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0)
-PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8)
-
-static int __fastcall
-padlock_xstore(void *outp,unsigned int code)
-{ _asm mov edi,ecx
- _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0
-}
-
-static void __fastcall
-padlock_reload_key(void)
-{ _asm pushfd _asm popfd }
-
-static void __fastcall
-padlock_verify_context(void *cdata)
-{ _asm {
- pushfd
- bt DWORD PTR[esp],30
- jnc skip
- cmp ecx,padlock_saved_context
- je skip
- popfd
- sub esp,4
- skip: add esp,4
- mov padlock_saved_context,ecx
- }
-}
-
-static int
-padlock_available(void)
-{ _asm {
- pushfd
- pop eax
- mov ecx,eax
- xor eax,1<<21
- push eax
- popfd
- pushfd
- pop eax
- xor eax,ecx
- bt eax,21
- jnc noluck
- mov eax,0
- cpuid
- xor eax,eax
- cmp ebx,'tneC'
- jne noluck
- cmp edx,'Hrua'
- jne noluck
- cmp ecx,'slua'
- jne noluck
- mov eax,0xC0000000
- cpuid
- mov edx,eax
- xor eax,eax
- cmp edx,0xC0000001
- jb noluck
- mov eax,0xC0000001
- cpuid
- xor eax,eax
- bt edx,6
- jnc skip_a
- bt edx,7
- jnc skip_a
- mov padlock_use_ace,1
- inc eax
- skip_a: bt edx,2
- jnc skip_r
- bt edx,3
- jnc skip_r
- mov padlock_use_rng,1
- inc eax
- skip_r:
- noluck:
- }
-}
-
-static void __fastcall
-padlock_bswapl(void *key)
-{ _asm {
- pushfd
- cld
- mov esi,ecx
- mov edi,ecx
- mov ecx,60
- up: lodsd
- bswap eax
- stosd
- loop up
- popfd
- }
-}
-
-/* MS actually specifies status of Direction Flag and compiler even
- * manages to compile following as 'rep movsd' all by itself...
- */
-#define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U))
-#endif
-
/* ===== AES encryption/decryption ===== */
#ifndef OPENSSL_NO_AES
@@ -789,14 +354,157 @@ static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
/* Function prototypes ... */
static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
const unsigned char *iv, int enc);
-static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
- const unsigned char *in, size_t nbytes);
#define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
#define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
NEAREST_ALIGNED(ctx->cipher_data))
+static int
+padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
+ const unsigned char *in_arg, size_t nbytes)
+{
+ return padlock_ecb_encrypt(out_arg,in_arg,
+ ALIGNED_CIPHER_DATA(ctx),nbytes);
+}
+static int
+padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
+ const unsigned char *in_arg, size_t nbytes)
+{
+ struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
+ int ret;
+
+ memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
+ if ((ret = padlock_cbc_encrypt(out_arg,in_arg,cdata,nbytes)))
+ memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
+ return ret;
+}
+
+static int
+padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
+ const unsigned char *in_arg, size_t nbytes)
+{
+ struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
+ size_t chunk;
+
+ if ((chunk = ctx->num)) { /* borrow chunk variable */
+ unsigned char *ivp=ctx->iv;
+
+ if (chunk >= AES_BLOCK_SIZE)
+ return 0; /* bogus value */
+
+ if (ctx->encrypt)
+ while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
+ chunk++, nbytes--;
+ }
+ else while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ unsigned char c = *(in_arg++);
+ *(out_arg++) = c ^ ivp[chunk];
+ ivp[chunk++] = c, nbytes--;
+ }
+
+ ctx->num = chunk%AES_BLOCK_SIZE;
+ }
+
+ if (nbytes == 0)
+ return 1;
+
+ memcpy (cdata->iv, ctx->iv, AES_BLOCK_SIZE);
+
+ if ((chunk = nbytes & ~(AES_BLOCK_SIZE-1))) {
+ if (!padlock_cfb_encrypt(out_arg,in_arg,cdata,chunk))
+ return 0;
+ nbytes -= chunk;
+ }
+
+ if (nbytes) {
+ unsigned char *ivp = cdata->iv;
+
+ out_arg += chunk;
+ in_arg += chunk;
+ ctx->num = nbytes;
+ if (cdata->cword.b.encdec) {
+ cdata->cword.b.encdec=0;
+ padlock_reload_key();
+ padlock_aes_block(ivp,ivp,cdata);
+ cdata->cword.b.encdec=1;
+ padlock_reload_key();
+ while(nbytes) {
+ unsigned char c = *(in_arg++);
+ *(out_arg++) = c ^ *ivp;
+ *(ivp++) = c, nbytes--;
+ }
+ }
+ else { padlock_reload_key();
+ padlock_aes_block(ivp,ivp,cdata);
+ padlock_reload_key();
+ while (nbytes) {
+ *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
+ ivp++, nbytes--;
+ }
+ }
+ }
+
+ memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
+
+ return 1;
+}
+
+static int
+padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
+ const unsigned char *in_arg, size_t nbytes)
+{
+ struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
+ size_t chunk;
+
+ /* ctx->num is maintained in byte-oriented modes,
+ such as CFB and OFB... */
+ if ((chunk = ctx->num)) { /* borrow chunk variable */
+ unsigned char *ivp=ctx->iv;
+
+ if (chunk >= AES_BLOCK_SIZE)
+ return 0; /* bogus value */
+
+ while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
+ *(out_arg++) = *(in_arg++) ^ ivp[chunk];
+ chunk++, nbytes--;
+ }
+
+ ctx->num = chunk%AES_BLOCK_SIZE;
+ }
+
+ if (nbytes == 0)
+ return 1;
+
+ memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
+
+ if ((chunk = nbytes & ~(AES_BLOCK_SIZE-1))) {
+ if (!padlock_ofb_encrypt(out_arg,in_arg,cdata,chunk))
+ return 0;
+ nbytes -= chunk;
+ }
+
+ if (nbytes) {
+ unsigned char *ivp = cdata->iv;
+
+ out_arg += chunk;
+ in_arg += chunk;
+ ctx->num = nbytes;
+ padlock_reload_key(); /* empirically found */
+ padlock_aes_block(ivp,ivp,cdata);
+ padlock_reload_key(); /* empirically found */
+ while (nbytes) {
+ *(out_arg++) = *(in_arg++) ^ *ivp;
+ ivp++, nbytes--;
+ }
+ }
+
+ memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
+
+ return 1;
+}
+
#define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
#define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
#define EVP_CIPHER_block_size_OFB 1
@@ -812,7 +520,7 @@ static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
AES_BLOCK_SIZE, \
0 | EVP_CIPH_##umode##_MODE, \
padlock_aes_init_key, \
- padlock_aes_cipher, \
+ padlock_##lmode##_cipher, \
NULL, \
sizeof(struct padlock_cipher_data) + 16, \
EVP_CIPHER_set_asn1_iv, \
@@ -932,15 +640,15 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
and is listed as hardware errata. They most
likely will fix it at some point and then
a check for stepping would be due here. */
- if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE ||
- EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE ||
- enc)
- AES_set_encrypt_key(key, key_len, &cdata->ks);
- else
+ if ((EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_ECB_MODE ||
+ EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CBC_MODE)
+ && !enc)
AES_set_decrypt_key(key, key_len, &cdata->ks);
+ else
+ AES_set_encrypt_key(key, key_len, &cdata->ks);
#ifndef AES_ASM
/* OpenSSL C functions use byte-swapped extended key. */
- padlock_bswapl(&cdata->ks);
+ padlock_key_bswap(&cdata->ks);
#endif
cdata->cword.b.keygen = 1;
break;
@@ -960,321 +668,6 @@ padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
return 1;
}
-/*
- * Simplified version of padlock_aes_cipher() used when
- * 1) both input and output buffers are at aligned addresses.
- * or when
- * 2) running on a newer CPU that doesn't require aligned buffers.
- */
-static int
-padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
- const unsigned char *in_arg, size_t nbytes)
-{
- struct padlock_cipher_data *cdata;
- void *iv;
-
- cdata = ALIGNED_CIPHER_DATA(ctx);
- padlock_verify_context(cdata);
-
- switch (EVP_CIPHER_CTX_mode(ctx)) {
- case EVP_CIPH_ECB_MODE:
- padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
- break;
-
- case EVP_CIPH_CBC_MODE:
- memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
- memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
- break;
-
- case EVP_CIPH_CFB_MODE:
- memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
- memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
- break;
-
- case EVP_CIPH_OFB_MODE:
- memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
- memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
- break;
-
- default:
- return 0;
- }
-
- memset(cdata->iv, 0, AES_BLOCK_SIZE);
-
- return 1;
-}
-
-#ifndef PADLOCK_CHUNK
-# define PADLOCK_CHUNK 512 /* Must be a power of 2 larger than 16 */
-#endif
-#if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
-# error "insane PADLOCK_CHUNK..."
-#endif
-
-/* Re-align the arguments to 16-Bytes boundaries and run the
- encryption function itself. This function is not AES-specific. */
-static int
-padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
- const unsigned char *in_arg, size_t nbytes)
-{
- struct padlock_cipher_data *cdata;
- const void *inp;
- unsigned char *out;
- void *iv;
- int inp_misaligned, out_misaligned, realign_in_loop;
- size_t chunk, allocated=0;
-
- /* ctx->num is maintained in byte-oriented modes,
- such as CFB and OFB... */
- if ((chunk = ctx->num)) { /* borrow chunk variable */
- unsigned char *ivp=ctx->iv;
-
- switch (EVP_CIPHER_CTX_mode(ctx)) {
- case EVP_CIPH_CFB_MODE:
- if (chunk >= AES_BLOCK_SIZE)
- return 0; /* bogus value */
-
- if (ctx->encrypt)
- while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
- ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
- chunk++, nbytes--;
- }
- else while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
- unsigned char c = *(in_arg++);
- *(out_arg++) = c ^ ivp[chunk];
- ivp[chunk++] = c, nbytes--;
- }
-
- ctx->num = chunk%AES_BLOCK_SIZE;
- break;
- case EVP_CIPH_OFB_MODE:
- if (chunk >= AES_BLOCK_SIZE)
- return 0; /* bogus value */
-
- while (chunk<AES_BLOCK_SIZE && nbytes!=0) {
- *(out_arg++) = *(in_arg++) ^ ivp[chunk];
- chunk++, nbytes--;
- }
-
- ctx->num = chunk%AES_BLOCK_SIZE;
- break;
- }
- }
-
- if (nbytes == 0)
- return 1;
-#if 0
- if (nbytes % AES_BLOCK_SIZE)
- return 0; /* are we expected to do tail processing? */
-#else
- /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC
- modes and arbitrary value in byte-oriented modes, such as
- CFB and OFB... */
-#endif
-
- /* VIA promises CPUs that won't require alignment in the future.
- For now padlock_aes_align_required is initialized to 1 and
- the condition is never met... */
- /* C7 core is capable to manage unaligned input in non-ECB[!]
- mode, but performance penalties appear to be approximately
- same as for software alignment below or ~3x. They promise to
- improve it in the future, but for now we can just as well
- pretend that it can only handle aligned input... */
- if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0)
- return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
-
- inp_misaligned = (((size_t)in_arg) & 0x0F);
- out_misaligned = (((size_t)out_arg) & 0x0F);
-
- /* Note that even if output is aligned and input not,
- * I still prefer to loop instead of copy the whole
- * input and then encrypt in one stroke. This is done
- * in order to improve L1 cache utilization... */
- realign_in_loop = out_misaligned|inp_misaligned;
-
- if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0)
- return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
-
- /* this takes one "if" out of the loops */
- chunk = nbytes;
- chunk %= PADLOCK_CHUNK;
- if (chunk==0) chunk = PADLOCK_CHUNK;
-
- if (out_misaligned) {
- /* optmize for small input */
- allocated = (chunk<nbytes?PADLOCK_CHUNK:nbytes);
- out = alloca(0x10 + allocated);
- out = NEAREST_ALIGNED(out);
- }
- else
- out = out_arg;
-
- cdata = ALIGNED_CIPHER_DATA(ctx);
- padlock_verify_context(cdata);
-
- switch (EVP_CIPHER_CTX_mode(ctx)) {
- case EVP_CIPH_ECB_MODE:
- do {
- if (inp_misaligned)
- inp = padlock_memcpy(out, in_arg, chunk);
- else
- inp = in_arg;
- in_arg += chunk;
-
- padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
-
- if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
- else
- out = out_arg+=chunk;
-
- nbytes -= chunk;
- chunk = PADLOCK_CHUNK;
- } while (nbytes);
- break;
-
- case EVP_CIPH_CBC_MODE:
- memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- goto cbc_shortcut;
- do {
- if (iv != cdata->iv)
- memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
- chunk = PADLOCK_CHUNK;
- cbc_shortcut: /* optimize for small input */
- if (inp_misaligned)
- inp = padlock_memcpy(out, in_arg, chunk);
- else
- inp = in_arg;
- in_arg += chunk;
-
- iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp);
-
- if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
- else
- out = out_arg+=chunk;
-
- } while (nbytes -= chunk);
- memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
- break;
-
- case EVP_CIPH_CFB_MODE:
- memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- chunk &= ~(AES_BLOCK_SIZE-1);
- if (chunk) goto cfb_shortcut;
- else goto cfb_skiploop;
- do {
- if (iv != cdata->iv)
- memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
- chunk = PADLOCK_CHUNK;
- cfb_shortcut: /* optimize for small input */
- if (inp_misaligned)
- inp = padlock_memcpy(out, in_arg, chunk);
- else
- inp = in_arg;
- in_arg += chunk;
-
- iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
-
- if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
- else
- out = out_arg+=chunk;
-
- nbytes -= chunk;
- } while (nbytes >= AES_BLOCK_SIZE);
-
- cfb_skiploop:
- if (nbytes) {
- unsigned char *ivp = cdata->iv;
-
- if (iv != ivp) {
- memcpy(ivp, iv, AES_BLOCK_SIZE);
- iv = ivp;
- }
- ctx->num = nbytes;
- if (cdata->cword.b.encdec) {
- cdata->cword.b.encdec=0;
- padlock_reload_key();
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
- cdata->cword.b.encdec=1;
- padlock_reload_key();
- while(nbytes) {
- unsigned char c = *(in_arg++);
- *(out_arg++) = c ^ *ivp;
- *(ivp++) = c, nbytes--;
- }
- }
- else { padlock_reload_key();
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
- padlock_reload_key();
- while (nbytes) {
- *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
- ivp++, nbytes--;
- }
- }
- }
-
- memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
- break;
-
- case EVP_CIPH_OFB_MODE:
- memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
- chunk &= ~(AES_BLOCK_SIZE-1);
- if (chunk) do {
- if (inp_misaligned)
- inp = padlock_memcpy(out, in_arg, chunk);
- else
- inp = in_arg;
- in_arg += chunk;
-
- padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
-
- if (out_misaligned)
- out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
- else
- out = out_arg+=chunk;
-
- nbytes -= chunk;
- chunk = PADLOCK_CHUNK;
- } while (nbytes >= AES_BLOCK_SIZE);
-
- if (nbytes) {
- unsigned char *ivp = cdata->iv;
-
- ctx->num = nbytes;
- padlock_reload_key(); /* empirically found */
- padlock_xcrypt_ecb(1,cdata,ivp,ivp);
- padlock_reload_key(); /* empirically found */
- while (nbytes) {
- *(out_arg++) = *(in_arg++) ^ *ivp;
- ivp++, nbytes--;
- }
- }
-
- memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
- break;
-
- default:
- return 0;
- }
-
- /* Clean the realign buffer if it was used */
- if (out_misaligned) {
- volatile unsigned long *p=(void *)out;
- size_t n = allocated/sizeof(*p);
- while (n--) *p++=0;
- }
-
- memset(cdata->iv, 0, AES_BLOCK_SIZE);
-
- return 1;
-}
-
#endif /* OPENSSL_NO_AES */
/* ===== Random Number Generator ===== */