From 8235ae0fa66402f9947bda1623440d197d167574 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 21 Apr 2017 15:47:11 +0300 Subject: [PATCH] Add 32 bit support for software AES too. --- CMakeLists.txt | 1 + algo/cryptonight/cryptonight_common.c | 13 +- .../i686/cryptonight_av4_softaes.c | 277 ++++++++++++++++++ 3 files changed, 282 insertions(+), 9 deletions(-) create mode 100644 algo/cryptonight/i686/cryptonight_av4_softaes.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 44b77493e..3d75c3edc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -111,6 +111,7 @@ if (CMAKE_SIZEOF_VOID_P EQUAL 8) else() set(CRYPTONIGHT32 algo/cryptonight/i686/cryptonight_av1_aesni.c + algo/cryptonight/i686/cryptonight_av4_softaes.c ) add_executable(xmrig32 ${HEADERS} ${HEADERS_CRYPTO} ${SOURCES} ${SOURCES_CRYPTO} ${HEADERS_UTILS} ${SOURCES_UTILS} ${HEADERS_COMPAT} ${SOURCES_COMPAT} ${SOURCES_OS} ${CRYPTONIGHT32}) diff --git a/algo/cryptonight/cryptonight_common.c b/algo/cryptonight/cryptonight_common.c index 9b448093f..67876e8c9 100644 --- a/algo/cryptonight/cryptonight_common.c +++ b/algo/cryptonight/cryptonight_common.c @@ -37,19 +37,14 @@ void cryptonight_av1_aesni(void* output, const void* input, struct cryptonight_ctx* ctx); +void cryptonight_av4_softaes(void* output, const void* input, struct cryptonight_ctx* ctx); #if defined(__x86_64__) - void cryptonight_av2_aesni_stak(void* output, const void* input, struct cryptonight_ctx* ctx); void cryptonight_av3_aesni_bmi2(void* output, const void* input, struct cryptonight_ctx* ctx); - void cryptonight_av4_softaes(void* output, const void* input, struct cryptonight_ctx* ctx); void cryptonight_av5_aesni_experimental(void* output, const void* input, struct cryptonight_ctx* ctx); -#elif defined(__i386__) -// void cryptonight_av1_aesni32(void* output, const void* input, const char *memory, struct cryptonight_ctx* ctx); #endif -//void cryptonight_av4_softaes(void* output, const void* input, struct cryptonight_ctx* ctx); - void (*cryptonight_hash_ctx)(void* output, const void* input, struct cryptonight_ctx* ctx) = NULL; @@ -74,9 +69,9 @@ void cryptonight_init(int variant) break; # endif -// case XMR_AV4_SOFT_AES: -// cryptonight_hash_ctx = cryptonight_av4_softaes; -// break; + case XMR_AV4_SOFT_AES: + cryptonight_hash_ctx = cryptonight_av4_softaes; + break; default: break; diff --git a/algo/cryptonight/i686/cryptonight_av4_softaes.c b/algo/cryptonight/i686/cryptonight_av4_softaes.c new file mode 100644 index 000000000..5053b1299 --- /dev/null +++ b/algo/cryptonight/i686/cryptonight_av4_softaes.c @@ -0,0 +1,277 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017 fireice-uk + * Copyright 2016-2017 XMRig + * + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "algo/cryptonight/cryptonight.h" +#include "crypto/c_keccak.h" + +__m128i soft_aesenc(__m128i in, __m128i key); +__m128i soft_aeskeygenassist(__m128i key, uint8_t rcon); + +#ifdef __GNUC__ +static inline uint64_t _umul128(uint64_t multiplier, uint64_t multiplicand, uint64_t *product_hi) { + // multiplier = ab = a * 2^32 + b + // multiplicand = cd = c * 2^32 + d + // ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d + uint64_t a = multiplier >> 32; + uint64_t b = multiplier & 0xFFFFFFFF; + uint64_t c = multiplicand >> 32; + uint64_t d = multiplicand & 0xFFFFFFFF; + + //uint64_t ac = a * c; + uint64_t ad = a * d; + //uint64_t bc = b * c; + uint64_t bd = b * d; + + uint64_t adbc = ad + (b * c); + uint64_t adbc_carry = adbc < ad ? 1 : 0; + + // multiplier * multiplicand = product_hi * 2^64 + product_lo + uint64_t product_lo = bd + (adbc << 32); + uint64_t product_lo_carry = product_lo < bd ? 1 : 0; + *product_hi = (a * c) + (adbc >> 32) + (adbc_carry << 32) + product_lo_carry; + + return product_lo; +} +#endif + + +#define HI32(X) \ + _mm_srli_si128((X), 4) + + +#define EXTRACT64(X) \ + ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ + ((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32)) + + +// This will shift and xor tmp1 into itself as 4 32-bit vals such as +// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) +static inline __m128i sl_xor(__m128i tmp1) +{ + __m128i tmp4; + tmp4 = _mm_slli_si128(tmp1, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + tmp4 = _mm_slli_si128(tmp4, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + tmp4 = _mm_slli_si128(tmp4, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + return tmp1; +} + + +static inline void aes_genkey_sub(__m128i* xout0, __m128i* xout2, uint8_t rcon) +{ + __m128i xout1 = soft_aeskeygenassist(*xout2, rcon); + xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem + *xout0 = sl_xor(*xout0); + *xout0 = _mm_xor_si128(*xout0, xout1); + xout1 = soft_aeskeygenassist(*xout0, 0x00); + xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem + *xout2 = sl_xor(*xout2); + *xout2 = _mm_xor_si128(*xout2, xout1); +} + + +static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) +{ + __m128i xout0 = _mm_load_si128(memory); + __m128i xout2 = _mm_load_si128(memory + 1); + *k0 = xout0; + *k1 = xout2; + + aes_genkey_sub(&xout0, &xout2, 0x1); + *k2 = xout0; + *k3 = xout2; + + aes_genkey_sub(&xout0, &xout2, 0x2); + *k4 = xout0; + *k5 = xout2; + + aes_genkey_sub(&xout0, &xout2, 0x4); + *k6 = xout0; + *k7 = xout2; + + aes_genkey_sub(&xout0, &xout2, 0x8); + *k8 = xout0; + *k9 = xout2; +} + + +static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) +{ + *x0 = soft_aesenc(*x0, key); + *x1 = soft_aesenc(*x1, key); + *x2 = soft_aesenc(*x2, key); + *x3 = soft_aesenc(*x3, key); + *x4 = soft_aesenc(*x4, key); + *x5 = soft_aesenc(*x5, key); + *x6 = soft_aesenc(*x6, key); + *x7 = soft_aesenc(*x7, key); +} + + +static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) +{ + // This is more than we have registers, compiler will assign 2 keys on the stack + __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; + __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; + + aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); + + xin0 = _mm_load_si128(input + 4); + xin1 = _mm_load_si128(input + 5); + xin2 = _mm_load_si128(input + 6); + xin3 = _mm_load_si128(input + 7); + xin4 = _mm_load_si128(input + 8); + xin5 = _mm_load_si128(input + 9); + xin6 = _mm_load_si128(input + 10); + xin7 = _mm_load_si128(input + 11); + + for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) { + aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + + _mm_store_si128(output + i + 0, xin0); + _mm_store_si128(output + i + 1, xin1); + _mm_store_si128(output + i + 2, xin2); + _mm_store_si128(output + i + 3, xin3); + _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); + _mm_store_si128(output + i + 4, xin4); + _mm_store_si128(output + i + 5, xin5); + _mm_store_si128(output + i + 6, xin6); + _mm_store_si128(output + i + 7, xin7); + _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); + } +} + + +static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) +{ + // This is more than we have registers, compiler will assign 2 keys on the stack + __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; + __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; + + aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); + + xout0 = _mm_load_si128(output + 4); + xout1 = _mm_load_si128(output + 5); + xout2 = _mm_load_si128(output + 6); + xout3 = _mm_load_si128(output + 7); + xout4 = _mm_load_si128(output + 8); + xout5 = _mm_load_si128(output + 9); + xout6 = _mm_load_si128(output + 10); + xout7 = _mm_load_si128(output + 11); + + for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) + { + _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); + xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); + xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); + xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); + xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); + _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); + xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); + xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); + xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); + xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); + + aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + } + + _mm_store_si128(output + 4, xout0); + _mm_store_si128(output + 5, xout1); + _mm_store_si128(output + 6, xout2); + _mm_store_si128(output + 7, xout3); + _mm_store_si128(output + 8, xout4); + _mm_store_si128(output + 9, xout5); + _mm_store_si128(output + 10, xout6); + _mm_store_si128(output + 11, xout7); +} + + +void cryptonight_av4_softaes(void *restrict output, const void *restrict input, struct cryptonight_ctx *restrict ctx) +{ + keccak((const uint8_t *) input, 76, ctx->state, 200); + + cn_explode_scratchpad((__m128i*) ctx->state, (__m128i*) ctx->memory); + + const uint8_t* l0 = ctx->memory; + uint64_t* h0 = (uint64_t*) ctx->state; + + uint64_t al0 = h0[0] ^ h0[4]; + uint64_t ah0 = h0[1] ^ h0[5]; + __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); + + uint64_t idx0 = h0[0] ^ h0[4]; + + for (size_t i = 0; __builtin_expect(i < 0x80000, 1); i++) { + __m128i cx; + cx = _mm_load_si128((__m128i *)&l0[idx0 & 0x1FFFF0]); + cx = soft_aesenc(cx, _mm_set_epi64x(ah0, al0)); + + _mm_store_si128((__m128i *)&l0[idx0 & 0x1FFFF0], _mm_xor_si128(bx0, cx)); + idx0 = EXTRACT64(cx); + bx0 = cx; + + uint64_t hi, lo, cl, ch; + cl = ((uint64_t*)&l0[idx0 & 0x1FFFF0])[0]; + ch = ((uint64_t*)&l0[idx0 & 0x1FFFF0])[1]; + lo = _umul128(idx0, cl, &hi); + + al0 += hi; + ah0 += lo; + + ((uint64_t*)&l0[idx0 & 0x1FFFF0])[0] = al0; + ((uint64_t*)&l0[idx0 & 0x1FFFF0])[1] = ah0; + + ah0 ^= ch; + al0 ^= cl; + idx0 = al0; + } + + cn_implode_scratchpad((__m128i*) ctx->memory, (__m128i*) ctx->state); + + keccakf(h0, 24); + extra_hashes[ctx->state[0] & 3](ctx->state, 200, output); +}