/* XMRig * Copyright 2010 Jeff Garzik * Copyright 2012-2014 pooler * Copyright 2014 Lucas Jones * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017 fireice-uk * Copyright 2016-2017 XMRig * * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #ifndef __CRYPTONIGHT_P_H__ #define __CRYPTONIGHT_P_H__ #include #define aes_genkey_sub(imm8) \ __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ *xout0 = sl_xor(*xout0); \ *xout0 = _mm_xor_si128(*xout0, xout1); \ xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ *xout2 = sl_xor(*xout2); \ *xout2 = _mm_xor_si128(*xout2, xout1); \ // This will shift and xor tmp1 into itself as 4 32-bit vals such as // sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) inline __m128i sl_xor(__m128i tmp1) { __m128i tmp4; tmp4 = _mm_slli_si128(tmp1, 0x04); tmp1 = _mm_xor_si128(tmp1, tmp4); tmp4 = _mm_slli_si128(tmp4, 0x04); tmp1 = _mm_xor_si128(tmp1, tmp4); tmp4 = _mm_slli_si128(tmp4, 0x04); tmp1 = _mm_xor_si128(tmp1, tmp4); return tmp1; } inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) { aes_genkey_sub(0x1) } inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) { aes_genkey_sub(0x2) } inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) { aes_genkey_sub(0x4) } inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) { aes_genkey_sub(0x8) } inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) { *x0 = _mm_aesenc_si128(*x0, key); *x1 = _mm_aesenc_si128(*x1, key); *x2 = _mm_aesenc_si128(*x2, key); *x3 = _mm_aesenc_si128(*x3, key); *x4 = _mm_aesenc_si128(*x4, key); *x5 = _mm_aesenc_si128(*x5, key); *x6 = _mm_aesenc_si128(*x6, key); *x7 = _mm_aesenc_si128(*x7, key); } inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) { __m128i xout0 = _mm_load_si128(memory); __m128i xout2 = _mm_load_si128(memory + 1); *k0 = xout0; *k1 = xout2; aes_genkey_sub1(&xout0, &xout2); *k2 = xout0; *k3 = xout2; aes_genkey_sub2(&xout0, &xout2); *k4 = xout0; *k5 = xout2; aes_genkey_sub4(&xout0, &xout2); *k6 = xout0; *k7 = xout2; aes_genkey_sub8(&xout0, &xout2); *k8 = xout0; *k9 = xout2; } inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) { // This is more than we have registers, compiler will assign 2 keys on the stack __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); xin0 = _mm_load_si128(input + 4); xin1 = _mm_load_si128(input + 5); xin2 = _mm_load_si128(input + 6); xin3 = _mm_load_si128(input + 7); xin4 = _mm_load_si128(input + 8); xin5 = _mm_load_si128(input + 9); xin6 = _mm_load_si128(input + 10); xin7 = _mm_load_si128(input + 11); for (size_t i = 0; __builtin_expect(i < MEMORY / sizeof(__m128i), 1); i += 8) { aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); _mm_store_si128(output + i + 0, xin0); _mm_store_si128(output + i + 1, xin1); _mm_store_si128(output + i + 2, xin2); _mm_store_si128(output + i + 3, xin3); _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); _mm_store_si128(output + i + 4, xin4); _mm_store_si128(output + i + 5, xin5); _mm_store_si128(output + i + 6, xin6); _mm_store_si128(output + i + 7, xin7); _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); } } inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) { // This is more than we have registers, compiler will assign 2 keys on the stack __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); xout0 = _mm_load_si128(output + 4); xout1 = _mm_load_si128(output + 5); xout2 = _mm_load_si128(output + 6); xout3 = _mm_load_si128(output + 7); xout4 = _mm_load_si128(output + 8); xout5 = _mm_load_si128(output + 9); xout6 = _mm_load_si128(output + 10); xout7 = _mm_load_si128(output + 11); for (size_t i = 0; __builtin_expect(i < MEMORY / sizeof(__m128i), 1); i += 8) { _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); } _mm_store_si128(output + 4, xout0); _mm_store_si128(output + 5, xout1); _mm_store_si128(output + 6, xout2); _mm_store_si128(output + 7, xout3); _mm_store_si128(output + 8, xout4); _mm_store_si128(output + 9, xout5); _mm_store_si128(output + 10, xout6); _mm_store_si128(output + 11, xout7); } #endif /* __CRYPTONIGHT_P_H__ */