diff options
author | Andy Polyakov <appro@openssl.org> | 2010-04-14 19:04:51 +0000 |
---|---|---|
committer | Andy Polyakov <appro@openssl.org> | 2010-04-14 19:04:51 +0000 |
commit | 4f39edbff1213c3c97f5a8367aa6fb650f1d57b3 (patch) | |
tree | 3cc53e4661c49a672a3df216053c7412a5c448ad /crypto/modes/gcm128.c | |
parent | 8decc967dc3089344e1bd0082f87a6b349d30ce7 (diff) | |
download | openssl-4f39edbff1213c3c97f5a8367aa6fb650f1d57b3.tar.gz |
gcm128.c and assembler modules: change argument order for gcm_ghash_4bit.
ghash-x86*.pl: fix performance numbers for Core2, as it turned out
previous ones were "tainted" by variable clock frequency.
Diffstat (limited to 'crypto/modes/gcm128.c')
-rw-r--r-- | crypto/modes/gcm128.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c index ce2d178215..7501833007 100644 --- a/crypto/modes/gcm128.c +++ b/crypto/modes/gcm128.c @@ -339,7 +339,7 @@ static const size_t rem_4bit[16] = { PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560), PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0) }; -static void gcm_gmult_4bit(u64 Xi[2], u128 Htable[16]) +static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) { u128 Z; int cnt = 15; @@ -410,7 +410,8 @@ static void gcm_gmult_4bit(u64 Xi[2], u128 Htable[16]) * mostly as reference and a placeholder for possible future * non-trivial optimization[s]... */ -static void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2], u128 Htable[16]) +static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) { u128 Z; int cnt; @@ -479,13 +480,13 @@ static void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2], u128 Htable[16]) } #endif #else -void gcm_gmult_4bit(u64 Xi[2],u128 Htable[16]); -void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2],u128 Htable[16]); +void gcm_gmult_4bit(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); #endif #define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) #if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT) -#define GHASH(in,len,ctx) gcm_ghash_4bit(in,len,(ctx)->Xi.u,(ctx)->Htable) +#define GHASH(in,len,ctx) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len) /* GHASH_CHUNK is "stride parameter" missioned to mitigate cache * trashing effect. In other words idea is to hash data while it's * still in L1 cache after encryption pass... */ |