From b35ecef06fd50251785cc89d7d551a7c3a65c2fe Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 24 Apr 2017 13:23:49 +0300 Subject: [PATCH] Move common code to cryptonight_p.h --- CMakeLists.txt | 3 +- .../{cryptonight_common.c => cryptonight.c} | 0 algo/cryptonight/cryptonight_av1_aesni.c | 194 +--------------- algo/cryptonight/cryptonight_av2_aesni_stak.c | 194 +--------------- algo/cryptonight/cryptonight_av3_aesni_bmi2.c | 185 +-------------- .../cryptonight_av5_aesni_experimental.c | 185 +-------------- algo/cryptonight/cryptonight_p.h | 215 ++++++++++++++++++ 7 files changed, 231 insertions(+), 745 deletions(-) rename algo/cryptonight/{cryptonight_common.c => cryptonight.c} (100%) create mode 100644 algo/cryptonight/cryptonight_p.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 3d75c3edc..b23c8bdee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,6 +4,7 @@ project(xmrig C) set(HEADERS compat.h algo/cryptonight/cryptonight.h + algo/cryptonight/cryptonight_p.h elist.h xmrig.h version.h @@ -35,7 +36,7 @@ set(HEADERS_UTILS set(SOURCES xmrig.c - algo/cryptonight/cryptonight_common.c + algo/cryptonight/cryptonight.c util.c options.c cpu.c diff --git a/algo/cryptonight/cryptonight_common.c b/algo/cryptonight/cryptonight.c similarity index 100% rename from algo/cryptonight/cryptonight_common.c rename to algo/cryptonight/cryptonight.c diff --git a/algo/cryptonight/cryptonight_av1_aesni.c b/algo/cryptonight/cryptonight_av1_aesni.c index 9bb374a8b..d0252996e 100644 --- a/algo/cryptonight/cryptonight_av1_aesni.c +++ b/algo/cryptonight/cryptonight_av1_aesni.c @@ -26,201 +26,19 @@ #include #include "cryptonight.h" +#include "cryptonight_p.h" #include "crypto/c_keccak.h" -#ifdef __GNUC__ -static inline uint64_t _umul128(uint64_t a, uint64_t b, uint64_t* hi) +#ifndef __BMI2__ +static inline uint64_t _mulx_u64(uint64_t a, uint64_t b, uint64_t* hi) { - unsigned __int128 r = (unsigned __int128)a * (unsigned __int128)b; + unsigned __int128 r = (unsigned __int128) a * (unsigned __int128) b; *hi = r >> 64; - return (uint64_t)r; + return (uint64_t) r; } #endif -#define aes_genkey_sub(imm8) \ - __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ - xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ - *xout0 = sl_xor(*xout0); \ - *xout0 = _mm_xor_si128(*xout0, xout1); \ - xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ - xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ - *xout2 = sl_xor(*xout2); \ - *xout2 = _mm_xor_si128(*xout2, xout1); \ - - -// This will shift and xor tmp1 into itself as 4 32-bit vals such as -// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) -static inline __m128i sl_xor(__m128i tmp1) -{ - __m128i tmp4; - tmp4 = _mm_slli_si128(tmp1, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - return tmp1; -} - - -static inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x1) -} - - -static inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x2) -} - - -static inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x4) -} - - -static inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x8) -} - - -static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) -{ - __m128i xout0 = _mm_load_si128(memory); - __m128i xout2 = _mm_load_si128(memory + 1); - *k0 = xout0; - *k1 = xout2; - - aes_genkey_sub1(&xout0, &xout2); - *k2 = xout0; - *k3 = xout2; - - aes_genkey_sub2(&xout0, &xout2); - *k4 = xout0; - *k5 = xout2; - - aes_genkey_sub4(&xout0, &xout2); - *k6 = xout0; - *k7 = xout2; - - aes_genkey_sub8(&xout0, &xout2); - *k8 = xout0; - *k9 = xout2; -} - - -static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) -{ - *x0 = _mm_aesenc_si128(*x0, key); - *x1 = _mm_aesenc_si128(*x1, key); - *x2 = _mm_aesenc_si128(*x2, key); - *x3 = _mm_aesenc_si128(*x3, key); - *x4 = _mm_aesenc_si128(*x4, key); - *x5 = _mm_aesenc_si128(*x5, key); - *x6 = _mm_aesenc_si128(*x6, key); - *x7 = _mm_aesenc_si128(*x7, key); -} - - -static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xin0 = _mm_load_si128(input + 4); - xin1 = _mm_load_si128(input + 5); - xin2 = _mm_load_si128(input + 6); - xin3 = _mm_load_si128(input + 7); - xin4 = _mm_load_si128(input + 8); - xin5 = _mm_load_si128(input + 9); - xin6 = _mm_load_si128(input + 10); - xin7 = _mm_load_si128(input + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) { - aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - - _mm_store_si128(output + i + 0, xin0); - _mm_store_si128(output + i + 1, xin1); - _mm_store_si128(output + i + 2, xin2); - _mm_store_si128(output + i + 3, xin3); - _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); - _mm_store_si128(output + i + 4, xin4); - _mm_store_si128(output + i + 5, xin5); - _mm_store_si128(output + i + 6, xin6); - _mm_store_si128(output + i + 7, xin7); - _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); - } -} - - -static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xout0 = _mm_load_si128(output + 4); - xout1 = _mm_load_si128(output + 5); - xout2 = _mm_load_si128(output + 6); - xout3 = _mm_load_si128(output + 7); - xout4 = _mm_load_si128(output + 8); - xout5 = _mm_load_si128(output + 9); - xout6 = _mm_load_si128(output + 10); - xout7 = _mm_load_si128(output + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) - { - _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); - xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); - xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); - xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); - xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); - _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); - xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); - xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); - xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); - xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); - - aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - } - - _mm_store_si128(output + 4, xout0); - _mm_store_si128(output + 5, xout1); - _mm_store_si128(output + 6, xout2); - _mm_store_si128(output + 7, xout3); - _mm_store_si128(output + 8, xout4); - _mm_store_si128(output + 9, xout5); - _mm_store_si128(output + 10, xout6); - _mm_store_si128(output + 11, xout7); -} - void cryptonight_av1_aesni(const void *restrict input, size_t size, void *restrict output, struct cryptonight_ctx *restrict ctx) { @@ -249,7 +67,7 @@ void cryptonight_av1_aesni(const void *restrict input, size_t size, void *restri uint64_t hi, lo, cl, ch; cl = ((uint64_t*) &l0[idx0 & 0x1FFFF0])[0]; ch = ((uint64_t*) &l0[idx0 & 0x1FFFF0])[1]; - lo = _umul128(idx0, cl, &hi); + lo = _mulx_u64(idx0, cl, &hi); al0 += hi; ah0 += lo; diff --git a/algo/cryptonight/cryptonight_av2_aesni_stak.c b/algo/cryptonight/cryptonight_av2_aesni_stak.c index a3cefe0b0..152a92c47 100644 --- a/algo/cryptonight/cryptonight_av2_aesni_stak.c +++ b/algo/cryptonight/cryptonight_av2_aesni_stak.c @@ -26,201 +26,19 @@ #include #include "cryptonight.h" +#include "cryptonight_p.h" #include "crypto/c_keccak.h" -#ifdef __GNUC__ -static inline uint64_t _umul128(uint64_t a, uint64_t b, uint64_t* hi) +#ifndef __BMI2__ +static inline uint64_t _mulx_u64(uint64_t a, uint64_t b, uint64_t* hi) { - unsigned __int128 r = (unsigned __int128)a * (unsigned __int128)b; + unsigned __int128 r = (unsigned __int128) a * (unsigned __int128) b; *hi = r >> 64; - return (uint64_t)r; + return (uint64_t) r; } #endif -#define aes_genkey_sub(imm8) \ - __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ - xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ - *xout0 = sl_xor(*xout0); \ - *xout0 = _mm_xor_si128(*xout0, xout1); \ - xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ - xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ - *xout2 = sl_xor(*xout2); \ - *xout2 = _mm_xor_si128(*xout2, xout1); \ - - -// This will shift and xor tmp1 into itself as 4 32-bit vals such as -// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) -static inline __m128i sl_xor(__m128i tmp1) -{ - __m128i tmp4; - tmp4 = _mm_slli_si128(tmp1, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - return tmp1; -} - - -static inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x1) -} - - -static inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x2) -} - - -static inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x4) -} - - -static inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x8) -} - - -static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) -{ - __m128i xout0 = _mm_load_si128(memory); - __m128i xout2 = _mm_load_si128(memory + 1); - *k0 = xout0; - *k1 = xout2; - - aes_genkey_sub1(&xout0, &xout2); - *k2 = xout0; - *k3 = xout2; - - aes_genkey_sub2(&xout0, &xout2); - *k4 = xout0; - *k5 = xout2; - - aes_genkey_sub4(&xout0, &xout2); - *k6 = xout0; - *k7 = xout2; - - aes_genkey_sub8(&xout0, &xout2); - *k8 = xout0; - *k9 = xout2; -} - - -static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) -{ - *x0 = _mm_aesenc_si128(*x0, key); - *x1 = _mm_aesenc_si128(*x1, key); - *x2 = _mm_aesenc_si128(*x2, key); - *x3 = _mm_aesenc_si128(*x3, key); - *x4 = _mm_aesenc_si128(*x4, key); - *x5 = _mm_aesenc_si128(*x5, key); - *x6 = _mm_aesenc_si128(*x6, key); - *x7 = _mm_aesenc_si128(*x7, key); -} - - -static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xin0 = _mm_load_si128(input + 4); - xin1 = _mm_load_si128(input + 5); - xin2 = _mm_load_si128(input + 6); - xin3 = _mm_load_si128(input + 7); - xin4 = _mm_load_si128(input + 8); - xin5 = _mm_load_si128(input + 9); - xin6 = _mm_load_si128(input + 10); - xin7 = _mm_load_si128(input + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) { - aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - - _mm_store_si128(output + i + 0, xin0); - _mm_store_si128(output + i + 1, xin1); - _mm_store_si128(output + i + 2, xin2); - _mm_store_si128(output + i + 3, xin3); - _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); - _mm_store_si128(output + i + 4, xin4); - _mm_store_si128(output + i + 5, xin5); - _mm_store_si128(output + i + 6, xin6); - _mm_store_si128(output + i + 7, xin7); - _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); - } -} - - -static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xout0 = _mm_load_si128(output + 4); - xout1 = _mm_load_si128(output + 5); - xout2 = _mm_load_si128(output + 6); - xout3 = _mm_load_si128(output + 7); - xout4 = _mm_load_si128(output + 8); - xout5 = _mm_load_si128(output + 9); - xout6 = _mm_load_si128(output + 10); - xout7 = _mm_load_si128(output + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) - { - _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); - xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); - xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); - xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); - xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); - _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); - xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); - xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); - xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); - xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); - - aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - } - - _mm_store_si128(output + 4, xout0); - _mm_store_si128(output + 5, xout1); - _mm_store_si128(output + 6, xout2); - _mm_store_si128(output + 7, xout3); - _mm_store_si128(output + 8, xout4); - _mm_store_si128(output + 9, xout5); - _mm_store_si128(output + 10, xout6); - _mm_store_si128(output + 11, xout7); -} - void cryptonight_av2_aesni_stak(const void *restrict input, size_t size, void *restrict output, struct cryptonight_ctx *restrict ctx) { @@ -251,7 +69,7 @@ void cryptonight_av2_aesni_stak(const void *restrict input, size_t size, void *r uint64_t hi, lo, cl, ch; cl = ((uint64_t*)&l0[idx0 & 0x1FFFF0])[0]; ch = ((uint64_t*)&l0[idx0 & 0x1FFFF0])[1]; - lo = _umul128(idx0, cl, &hi); + lo = _mulx_u64(idx0, cl, &hi); al0 += hi; ah0 += lo; diff --git a/algo/cryptonight/cryptonight_av3_aesni_bmi2.c b/algo/cryptonight/cryptonight_av3_aesni_bmi2.c index d532bce8c..6ebfb7ef3 100644 --- a/algo/cryptonight/cryptonight_av3_aesni_bmi2.c +++ b/algo/cryptonight/cryptonight_av3_aesni_bmi2.c @@ -26,193 +26,10 @@ #include #include "cryptonight.h" +#include "cryptonight_p.h" #include "crypto/c_keccak.h" -#define aes_genkey_sub(imm8) \ - __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ - xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ - *xout0 = sl_xor(*xout0); \ - *xout0 = _mm_xor_si128(*xout0, xout1); \ - xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ - xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ - *xout2 = sl_xor(*xout2); \ - *xout2 = _mm_xor_si128(*xout2, xout1); \ - - -// This will shift and xor tmp1 into itself as 4 32-bit vals such as -// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) -static inline __m128i sl_xor(__m128i tmp1) -{ - __m128i tmp4; - tmp4 = _mm_slli_si128(tmp1, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - return tmp1; -} - - -static inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x1) -} - - -static inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x2) -} - - -static inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x4) -} - - -static inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x8) -} - - -static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) -{ - __m128i xout0 = _mm_load_si128(memory); - __m128i xout2 = _mm_load_si128(memory + 1); - *k0 = xout0; - *k1 = xout2; - - aes_genkey_sub1(&xout0, &xout2); - *k2 = xout0; - *k3 = xout2; - - aes_genkey_sub2(&xout0, &xout2); - *k4 = xout0; - *k5 = xout2; - - aes_genkey_sub4(&xout0, &xout2); - *k6 = xout0; - *k7 = xout2; - - aes_genkey_sub8(&xout0, &xout2); - *k8 = xout0; - *k9 = xout2; -} - - -static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) -{ - *x0 = _mm_aesenc_si128(*x0, key); - *x1 = _mm_aesenc_si128(*x1, key); - *x2 = _mm_aesenc_si128(*x2, key); - *x3 = _mm_aesenc_si128(*x3, key); - *x4 = _mm_aesenc_si128(*x4, key); - *x5 = _mm_aesenc_si128(*x5, key); - *x6 = _mm_aesenc_si128(*x6, key); - *x7 = _mm_aesenc_si128(*x7, key); -} - - -static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xin0 = _mm_load_si128(input + 4); - xin1 = _mm_load_si128(input + 5); - xin2 = _mm_load_si128(input + 6); - xin3 = _mm_load_si128(input + 7); - xin4 = _mm_load_si128(input + 8); - xin5 = _mm_load_si128(input + 9); - xin6 = _mm_load_si128(input + 10); - xin7 = _mm_load_si128(input + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) { - aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - - _mm_store_si128(output + i + 0, xin0); - _mm_store_si128(output + i + 1, xin1); - _mm_store_si128(output + i + 2, xin2); - _mm_store_si128(output + i + 3, xin3); - _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); - _mm_store_si128(output + i + 4, xin4); - _mm_store_si128(output + i + 5, xin5); - _mm_store_si128(output + i + 6, xin6); - _mm_store_si128(output + i + 7, xin7); - _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); - } -} - - -static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xout0 = _mm_load_si128(output + 4); - xout1 = _mm_load_si128(output + 5); - xout2 = _mm_load_si128(output + 6); - xout3 = _mm_load_si128(output + 7); - xout4 = _mm_load_si128(output + 8); - xout5 = _mm_load_si128(output + 9); - xout6 = _mm_load_si128(output + 10); - xout7 = _mm_load_si128(output + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) - { - _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); - xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); - xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); - xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); - xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); - _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); - xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); - xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); - xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); - xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); - - aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - } - - _mm_store_si128(output + 4, xout0); - _mm_store_si128(output + 5, xout1); - _mm_store_si128(output + 6, xout2); - _mm_store_si128(output + 7, xout3); - _mm_store_si128(output + 8, xout4); - _mm_store_si128(output + 9, xout5); - _mm_store_si128(output + 10, xout6); - _mm_store_si128(output + 11, xout7); -} - - void cryptonight_av3_aesni_bmi2(const void *restrict input, size_t size, void *restrict output, struct cryptonight_ctx *restrict ctx) { keccak((const uint8_t *) input, size, ctx->state, 200); diff --git a/algo/cryptonight/cryptonight_av5_aesni_experimental.c b/algo/cryptonight/cryptonight_av5_aesni_experimental.c index cad6290f7..21e823386 100644 --- a/algo/cryptonight/cryptonight_av5_aesni_experimental.c +++ b/algo/cryptonight/cryptonight_av5_aesni_experimental.c @@ -26,193 +26,10 @@ #include #include "cryptonight.h" +#include "cryptonight_p.h" #include "crypto/c_keccak.h" -#define aes_genkey_sub(imm8) \ - __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ - xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ - *xout0 = sl_xor(*xout0); \ - *xout0 = _mm_xor_si128(*xout0, xout1); \ - xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ - xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ - *xout2 = sl_xor(*xout2); \ - *xout2 = _mm_xor_si128(*xout2, xout1); \ - - -// This will shift and xor tmp1 into itself as 4 32-bit vals such as -// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) -static inline __m128i sl_xor(__m128i tmp1) -{ - __m128i tmp4; - tmp4 = _mm_slli_si128(tmp1, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - tmp4 = _mm_slli_si128(tmp4, 0x04); - tmp1 = _mm_xor_si128(tmp1, tmp4); - return tmp1; -} - - -static inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x1) -} - - -static inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x2) -} - - -static inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x4) -} - - -static inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) -{ - aes_genkey_sub(0x8) -} - - -static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) -{ - __m128i xout0 = _mm_load_si128(memory); - __m128i xout2 = _mm_load_si128(memory + 1); - *k0 = xout0; - *k1 = xout2; - - aes_genkey_sub1(&xout0, &xout2); - *k2 = xout0; - *k3 = xout2; - - aes_genkey_sub2(&xout0, &xout2); - *k4 = xout0; - *k5 = xout2; - - aes_genkey_sub4(&xout0, &xout2); - *k6 = xout0; - *k7 = xout2; - - aes_genkey_sub8(&xout0, &xout2); - *k8 = xout0; - *k9 = xout2; -} - - -static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) -{ - *x0 = _mm_aesenc_si128(*x0, key); - *x1 = _mm_aesenc_si128(*x1, key); - *x2 = _mm_aesenc_si128(*x2, key); - *x3 = _mm_aesenc_si128(*x3, key); - *x4 = _mm_aesenc_si128(*x4, key); - *x5 = _mm_aesenc_si128(*x5, key); - *x6 = _mm_aesenc_si128(*x6, key); - *x7 = _mm_aesenc_si128(*x7, key); -} - - -static inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xin0 = _mm_load_si128(input + 4); - xin1 = _mm_load_si128(input + 5); - xin2 = _mm_load_si128(input + 6); - xin3 = _mm_load_si128(input + 7); - xin4 = _mm_load_si128(input + 8); - xin5 = _mm_load_si128(input + 9); - xin6 = _mm_load_si128(input + 10); - xin7 = _mm_load_si128(input + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) { - aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); - - _mm_store_si128(output + i + 0, xin0); - _mm_store_si128(output + i + 1, xin1); - _mm_store_si128(output + i + 2, xin2); - _mm_store_si128(output + i + 3, xin3); - _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); - _mm_store_si128(output + i + 4, xin4); - _mm_store_si128(output + i + 5, xin5); - _mm_store_si128(output + i + 6, xin6); - _mm_store_si128(output + i + 7, xin7); - _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); - } -} - - -static inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) -{ - // This is more than we have registers, compiler will assign 2 keys on the stack - __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; - __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; - - aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); - - xout0 = _mm_load_si128(output + 4); - xout1 = _mm_load_si128(output + 5); - xout2 = _mm_load_si128(output + 6); - xout3 = _mm_load_si128(output + 7); - xout4 = _mm_load_si128(output + 8); - xout5 = _mm_load_si128(output + 9); - xout6 = _mm_load_si128(output + 10); - xout7 = _mm_load_si128(output + 11); - - for (size_t i = 0; i < MEMORY / sizeof(__m128i); i += 8) - { - _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); - xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); - xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); - xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); - xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); - _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); - xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); - xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); - xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); - xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); - - aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - } - - _mm_store_si128(output + 4, xout0); - _mm_store_si128(output + 5, xout1); - _mm_store_si128(output + 6, xout2); - _mm_store_si128(output + 7, xout3); - _mm_store_si128(output + 8, xout4); - _mm_store_si128(output + 9, xout5); - _mm_store_si128(output + 10, xout6); - _mm_store_si128(output + 11, xout7); -} - - void cryptonight_av5_aesni_experimental(const void *restrict input, size_t size, void *restrict output, struct cryptonight_ctx *restrict ctx) { const uint8_t* memory = ctx->memory; diff --git a/algo/cryptonight/cryptonight_p.h b/algo/cryptonight/cryptonight_p.h new file mode 100644 index 000000000..2f533deaa --- /dev/null +++ b/algo/cryptonight/cryptonight_p.h @@ -0,0 +1,215 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017 fireice-uk + * Copyright 2016-2017 XMRig + * + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __CRYPTONIGHT_P_H__ +#define __CRYPTONIGHT_P_H__ + +#include + + +#define aes_genkey_sub(imm8) \ + __m128i xout1 = _mm_aeskeygenassist_si128(*xout2, (imm8)); \ + xout1 = _mm_shuffle_epi32(xout1, 0xFF); \ + *xout0 = sl_xor(*xout0); \ + *xout0 = _mm_xor_si128(*xout0, xout1); \ + xout1 = _mm_aeskeygenassist_si128(*xout0, 0x00);\ + xout1 = _mm_shuffle_epi32(xout1, 0xAA); \ + *xout2 = sl_xor(*xout2); \ + *xout2 = _mm_xor_si128(*xout2, xout1); \ + + +// This will shift and xor tmp1 into itself as 4 32-bit vals such as +// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1) +inline __m128i sl_xor(__m128i tmp1) +{ + __m128i tmp4; + tmp4 = _mm_slli_si128(tmp1, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + tmp4 = _mm_slli_si128(tmp4, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + tmp4 = _mm_slli_si128(tmp4, 0x04); + tmp1 = _mm_xor_si128(tmp1, tmp4); + return tmp1; +} + + +inline void aes_genkey_sub1(__m128i* xout0, __m128i* xout2) +{ + aes_genkey_sub(0x1) +} + + +inline void aes_genkey_sub2(__m128i* xout0, __m128i* xout2) +{ + aes_genkey_sub(0x2) +} + + +inline void aes_genkey_sub4(__m128i* xout0, __m128i* xout2) +{ + aes_genkey_sub(0x4) +} + + +inline void aes_genkey_sub8(__m128i* xout0, __m128i* xout2) +{ + aes_genkey_sub(0x8) +} + + +inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7) +{ + *x0 = _mm_aesenc_si128(*x0, key); + *x1 = _mm_aesenc_si128(*x1, key); + *x2 = _mm_aesenc_si128(*x2, key); + *x3 = _mm_aesenc_si128(*x3, key); + *x4 = _mm_aesenc_si128(*x4, key); + *x5 = _mm_aesenc_si128(*x5, key); + *x6 = _mm_aesenc_si128(*x6, key); + *x7 = _mm_aesenc_si128(*x7, key); +} + + +inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9) +{ + __m128i xout0 = _mm_load_si128(memory); + __m128i xout2 = _mm_load_si128(memory + 1); + *k0 = xout0; + *k1 = xout2; + + aes_genkey_sub1(&xout0, &xout2); + *k2 = xout0; + *k3 = xout2; + + aes_genkey_sub2(&xout0, &xout2); + *k4 = xout0; + *k5 = xout2; + + aes_genkey_sub4(&xout0, &xout2); + *k6 = xout0; + *k7 = xout2; + + aes_genkey_sub8(&xout0, &xout2); + *k8 = xout0; + *k9 = xout2; +} + + +inline void cn_explode_scratchpad(const __m128i* input, __m128i* output) +{ + // This is more than we have registers, compiler will assign 2 keys on the stack + __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; + __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; + + aes_genkey(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); + + xin0 = _mm_load_si128(input + 4); + xin1 = _mm_load_si128(input + 5); + xin2 = _mm_load_si128(input + 6); + xin3 = _mm_load_si128(input + 7); + xin4 = _mm_load_si128(input + 8); + xin5 = _mm_load_si128(input + 9); + xin6 = _mm_load_si128(input + 10); + xin7 = _mm_load_si128(input + 11); + + for (size_t i = 0; __builtin_expect(i < MEMORY / sizeof(__m128i), 1); i += 8) { + aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + aes_round(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); + + _mm_store_si128(output + i + 0, xin0); + _mm_store_si128(output + i + 1, xin1); + _mm_store_si128(output + i + 2, xin2); + _mm_store_si128(output + i + 3, xin3); + _mm_prefetch((const char*)output + i + 0, _MM_HINT_T2); + _mm_store_si128(output + i + 4, xin4); + _mm_store_si128(output + i + 5, xin5); + _mm_store_si128(output + i + 6, xin6); + _mm_store_si128(output + i + 7, xin7); + _mm_prefetch((const char*)output + i + 4, _MM_HINT_T2); + } +} + + +inline void cn_implode_scratchpad(const __m128i* input, __m128i* output) +{ + // This is more than we have registers, compiler will assign 2 keys on the stack + __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; + __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; + + aes_genkey(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9); + + xout0 = _mm_load_si128(output + 4); + xout1 = _mm_load_si128(output + 5); + xout2 = _mm_load_si128(output + 6); + xout3 = _mm_load_si128(output + 7); + xout4 = _mm_load_si128(output + 8); + xout5 = _mm_load_si128(output + 9); + xout6 = _mm_load_si128(output + 10); + xout7 = _mm_load_si128(output + 11); + + for (size_t i = 0; __builtin_expect(i < MEMORY / sizeof(__m128i), 1); i += 8) + { + _mm_prefetch((const char*)input + i + 0, _MM_HINT_NTA); + xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); + xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); + xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); + xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3); + _mm_prefetch((const char*)input + i + 4, _MM_HINT_NTA); + xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4); + xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5); + xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6); + xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7); + + aes_round(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); + } + + _mm_store_si128(output + 4, xout0); + _mm_store_si128(output + 5, xout1); + _mm_store_si128(output + 6, xout2); + _mm_store_si128(output + 7, xout3); + _mm_store_si128(output + 8, xout4); + _mm_store_si128(output + 9, xout5); + _mm_store_si128(output + 10, xout6); + _mm_store_si128(output + 11, xout7); +} + + +#endif /* __CRYPTONIGHT_P_H__ */