2017-06-08 06:47:25 +00:00
|
|
|
/* XMRig
|
2017-11-06 00:11:35 +00:00
|
|
|
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
|
|
|
|
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
|
|
|
|
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
|
|
|
|
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
|
|
|
|
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
|
|
|
|
* Copyright 2016 Imran Yusuff <https://github.com/imranyusuff>
|
2018-03-06 10:06:07 +00:00
|
|
|
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
|
|
|
|
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
|
|
|
|
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
|
2017-06-08 06:47:25 +00:00
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2017-11-26 19:23:23 +00:00
|
|
|
#ifndef __CRYPTONIGHT_ARM_H__
|
|
|
|
#define __CRYPTONIGHT_ARM_H__
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
#include "common/crypto/keccak.h"
|
2018-04-15 04:08:47 +00:00
|
|
|
#include "common/utils/mm_malloc.h"
|
2017-06-08 06:47:25 +00:00
|
|
|
#include "crypto/CryptoNight.h"
|
2018-04-03 10:51:06 +00:00
|
|
|
#include "crypto/CryptoNight_constants.h"
|
2018-03-07 08:32:44 +00:00
|
|
|
#include "crypto/CryptoNight_monero.h"
|
2017-11-03 02:35:29 +00:00
|
|
|
#include "crypto/soft_aes.h"
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
extern "C"
|
|
|
|
{
|
|
|
|
#include "crypto/c_groestl.h"
|
|
|
|
#include "crypto/c_blake256.h"
|
|
|
|
#include "crypto/c_jh.h"
|
|
|
|
#include "crypto/c_skein.h"
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-17 22:48:19 +00:00
|
|
|
static inline void do_blake_hash(const uint8_t *input, size_t len, uint8_t *output) {
|
|
|
|
blake256_hash(output, input, len);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-17 22:48:19 +00:00
|
|
|
static inline void do_groestl_hash(const uint8_t *input, size_t len, uint8_t *output) {
|
|
|
|
groestl(input, len * 8, output);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-17 22:48:19 +00:00
|
|
|
static inline void do_jh_hash(const uint8_t *input, size_t len, uint8_t *output) {
|
|
|
|
jh_hash(32 * 8, input, 8 * len, output);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-17 22:48:19 +00:00
|
|
|
static inline void do_skein_hash(const uint8_t *input, size_t len, uint8_t *output) {
|
|
|
|
xmr_skein(input, output);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-17 22:48:19 +00:00
|
|
|
void (* const extra_hashes[4])(const uint8_t *, size_t, uint8_t *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash};
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
|
2017-11-06 00:11:35 +00:00
|
|
|
static inline __attribute__((always_inline)) __m128i _mm_set_epi64x(const uint64_t a, const uint64_t b)
|
|
|
|
{
|
|
|
|
return vcombine_u64(vcreate_u64(b), vcreate_u64(a));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-02 17:09:59 +00:00
|
|
|
#if __ARM_FEATURE_CRYPTO
|
2018-04-19 04:54:11 +00:00
|
|
|
static inline __attribute__((always_inline)) __m128i _mm_aesenc_si128(__m128i v, __m128i rkey)
|
|
|
|
{
|
|
|
|
alignas(16) const __m128i zero = { 0 };
|
|
|
|
return veorq_u8(vaesmcq_u8(vaeseq_u8(v, zero)), rkey );
|
|
|
|
}
|
2018-04-19 12:44:17 +00:00
|
|
|
#else
|
|
|
|
static inline __attribute__((always_inline)) __m128i _mm_aesenc_si128(__m128i v, __m128i rkey)
|
|
|
|
{
|
2018-06-02 17:09:59 +00:00
|
|
|
alignas(16) const __m128i zero = { 0 };
|
|
|
|
return zero;
|
2018-04-19 12:44:17 +00:00
|
|
|
}
|
|
|
|
#endif
|
2018-04-19 04:54:11 +00:00
|
|
|
|
|
|
|
|
2017-11-06 00:11:35 +00:00
|
|
|
/* this one was not implemented yet so here it is */
|
|
|
|
static inline __attribute__((always_inline)) uint64_t _mm_cvtsi128_si64(__m128i a)
|
|
|
|
{
|
|
|
|
return vgetq_lane_u64(a, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
|
2018-06-02 17:09:59 +00:00
|
|
|
#if defined (__arm64__) || defined (__aarch64__)
|
2017-06-08 20:31:42 +00:00
|
|
|
static inline uint64_t __umul128(uint64_t a, uint64_t b, uint64_t* hi)
|
2017-06-08 06:47:25 +00:00
|
|
|
{
|
|
|
|
unsigned __int128 r = (unsigned __int128) a * (unsigned __int128) b;
|
|
|
|
*hi = r >> 64;
|
|
|
|
return (uint64_t) r;
|
|
|
|
}
|
2017-11-26 19:23:23 +00:00
|
|
|
#else
|
|
|
|
static inline uint64_t __umul128(uint64_t multiplier, uint64_t multiplicand, uint64_t *product_hi) {
|
|
|
|
// multiplier = ab = a * 2^32 + b
|
|
|
|
// multiplicand = cd = c * 2^32 + d
|
|
|
|
// ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d
|
|
|
|
uint64_t a = multiplier >> 32;
|
|
|
|
uint64_t b = multiplier & 0xFFFFFFFF;
|
|
|
|
uint64_t c = multiplicand >> 32;
|
|
|
|
uint64_t d = multiplicand & 0xFFFFFFFF;
|
|
|
|
|
|
|
|
//uint64_t ac = a * c;
|
|
|
|
uint64_t ad = a * d;
|
|
|
|
//uint64_t bc = b * c;
|
|
|
|
uint64_t bd = b * d;
|
|
|
|
|
|
|
|
uint64_t adbc = ad + (b * c);
|
|
|
|
uint64_t adbc_carry = adbc < ad ? 1 : 0;
|
|
|
|
|
|
|
|
// multiplier * multiplicand = product_hi * 2^64 + product_lo
|
|
|
|
uint64_t product_lo = bd + (adbc << 32);
|
|
|
|
uint64_t product_lo_carry = product_lo < bd ? 1 : 0;
|
|
|
|
*product_hi = (a * c) + (adbc >> 32) + (adbc_carry << 32) + product_lo_carry;
|
|
|
|
|
|
|
|
return product_lo;
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
// This will shift and xor tmp1 into itself as 4 32-bit vals such as
|
|
|
|
// sl_xor(a1 a2 a3 a4) = a1 (a2^a1) (a3^a2^a1) (a4^a3^a2^a1)
|
|
|
|
static inline __m128i sl_xor(__m128i tmp1)
|
|
|
|
{
|
|
|
|
__m128i tmp4;
|
|
|
|
tmp4 = _mm_slli_si128(tmp1, 0x04);
|
|
|
|
tmp1 = _mm_xor_si128(tmp1, tmp4);
|
|
|
|
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
|
|
|
tmp1 = _mm_xor_si128(tmp1, tmp4);
|
|
|
|
tmp4 = _mm_slli_si128(tmp4, 0x04);
|
|
|
|
tmp1 = _mm_xor_si128(tmp1, tmp4);
|
|
|
|
return tmp1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-03 02:35:29 +00:00
|
|
|
template<uint8_t rcon>
|
|
|
|
static inline void soft_aes_genkey_sub(__m128i* xout0, __m128i* xout2)
|
2017-06-08 06:47:25 +00:00
|
|
|
{
|
2017-11-03 02:35:29 +00:00
|
|
|
__m128i xout1 = soft_aeskeygenassist<rcon>(*xout2);
|
2017-06-08 06:47:25 +00:00
|
|
|
xout1 = _mm_shuffle_epi32(xout1, 0xFF); // see PSHUFD, set all elems to 4th elem
|
|
|
|
*xout0 = sl_xor(*xout0);
|
|
|
|
*xout0 = _mm_xor_si128(*xout0, xout1);
|
2017-11-03 02:35:29 +00:00
|
|
|
xout1 = soft_aeskeygenassist<0x00>(*xout0);
|
2017-06-08 06:47:25 +00:00
|
|
|
xout1 = _mm_shuffle_epi32(xout1, 0xAA); // see PSHUFD, set all elems to 3rd elem
|
|
|
|
*xout2 = sl_xor(*xout2);
|
|
|
|
*xout2 = _mm_xor_si128(*xout2, xout1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<bool SOFT_AES>
|
|
|
|
static inline void aes_genkey(const __m128i* memory, __m128i* k0, __m128i* k1, __m128i* k2, __m128i* k3, __m128i* k4, __m128i* k5, __m128i* k6, __m128i* k7, __m128i* k8, __m128i* k9)
|
|
|
|
{
|
|
|
|
__m128i xout0 = _mm_load_si128(memory);
|
2017-11-03 02:35:29 +00:00
|
|
|
__m128i xout2 = _mm_load_si128(memory + 1);
|
2017-06-08 06:47:25 +00:00
|
|
|
*k0 = xout0;
|
|
|
|
*k1 = xout2;
|
|
|
|
|
2018-04-19 04:54:11 +00:00
|
|
|
soft_aes_genkey_sub<0x01>(&xout0, &xout2);
|
2017-06-08 06:47:25 +00:00
|
|
|
*k2 = xout0;
|
|
|
|
*k3 = xout2;
|
|
|
|
|
2018-04-19 04:54:11 +00:00
|
|
|
soft_aes_genkey_sub<0x02>(&xout0, &xout2);
|
2017-06-08 06:47:25 +00:00
|
|
|
*k4 = xout0;
|
|
|
|
*k5 = xout2;
|
|
|
|
|
2018-04-19 04:54:11 +00:00
|
|
|
soft_aes_genkey_sub<0x04>(&xout0, &xout2);
|
2017-06-08 06:47:25 +00:00
|
|
|
*k6 = xout0;
|
|
|
|
*k7 = xout2;
|
|
|
|
|
2018-04-19 04:54:11 +00:00
|
|
|
soft_aes_genkey_sub<0x08>(&xout0, &xout2);
|
2017-06-08 06:47:25 +00:00
|
|
|
*k8 = xout0;
|
|
|
|
*k9 = xout2;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<bool SOFT_AES>
|
|
|
|
static inline void aes_round(__m128i key, __m128i* x0, __m128i* x1, __m128i* x2, __m128i* x3, __m128i* x4, __m128i* x5, __m128i* x6, __m128i* x7)
|
|
|
|
{
|
|
|
|
if (SOFT_AES) {
|
2018-01-28 11:58:19 +00:00
|
|
|
*x0 = soft_aesenc((uint32_t*)x0, key);
|
|
|
|
*x1 = soft_aesenc((uint32_t*)x1, key);
|
|
|
|
*x2 = soft_aesenc((uint32_t*)x2, key);
|
|
|
|
*x3 = soft_aesenc((uint32_t*)x3, key);
|
|
|
|
*x4 = soft_aesenc((uint32_t*)x4, key);
|
|
|
|
*x5 = soft_aesenc((uint32_t*)x5, key);
|
|
|
|
*x6 = soft_aesenc((uint32_t*)x6, key);
|
|
|
|
*x7 = soft_aesenc((uint32_t*)x7, key);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
else {
|
2018-04-19 04:54:11 +00:00
|
|
|
*x0 = _mm_aesenc_si128(*x0, key);
|
|
|
|
*x1 = _mm_aesenc_si128(*x1, key);
|
|
|
|
*x2 = _mm_aesenc_si128(*x2, key);
|
|
|
|
*x3 = _mm_aesenc_si128(*x3, key);
|
|
|
|
*x4 = _mm_aesenc_si128(*x4, key);
|
|
|
|
*x5 = _mm_aesenc_si128(*x5, key);
|
|
|
|
*x6 = _mm_aesenc_si128(*x6, key);
|
|
|
|
*x7 = _mm_aesenc_si128(*x7, key);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
inline void mix_and_propagate(__m128i& x0, __m128i& x1, __m128i& x2, __m128i& x3, __m128i& x4, __m128i& x5, __m128i& x6, __m128i& x7)
|
|
|
|
{
|
|
|
|
__m128i tmp0 = x0;
|
|
|
|
x0 = _mm_xor_si128(x0, x1);
|
|
|
|
x1 = _mm_xor_si128(x1, x2);
|
|
|
|
x2 = _mm_xor_si128(x2, x3);
|
|
|
|
x3 = _mm_xor_si128(x3, x4);
|
|
|
|
x4 = _mm_xor_si128(x4, x5);
|
|
|
|
x5 = _mm_xor_si128(x5, x6);
|
|
|
|
x6 = _mm_xor_si128(x6, x7);
|
|
|
|
x7 = _mm_xor_si128(x7, tmp0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<xmrig::Algo ALGO, size_t MEM, bool SOFT_AES>
|
2017-06-08 06:47:25 +00:00
|
|
|
static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
|
|
|
|
{
|
|
|
|
__m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7;
|
|
|
|
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
|
|
|
|
|
|
|
|
aes_genkey<SOFT_AES>(input, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9);
|
|
|
|
|
|
|
|
xin0 = _mm_load_si128(input + 4);
|
|
|
|
xin1 = _mm_load_si128(input + 5);
|
|
|
|
xin2 = _mm_load_si128(input + 6);
|
|
|
|
xin3 = _mm_load_si128(input + 7);
|
|
|
|
xin4 = _mm_load_si128(input + 8);
|
|
|
|
xin5 = _mm_load_si128(input + 9);
|
|
|
|
xin6 = _mm_load_si128(input + 10);
|
|
|
|
xin7 = _mm_load_si128(input + 11);
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
|
|
|
for (size_t i = 0; i < 16; i++) {
|
|
|
|
aes_round<SOFT_AES>(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
2018-04-19 04:54:11 +00:00
|
|
|
aes_round<SOFT_AES>(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
mix_and_propagate(xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-12 13:19:07 +00:00
|
|
|
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) {
|
2017-06-08 06:47:25 +00:00
|
|
|
aes_round<SOFT_AES>(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k3, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k4, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k5, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k6, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k7, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
|
|
|
aes_round<SOFT_AES>(k8, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
2018-04-19 04:54:11 +00:00
|
|
|
aes_round<SOFT_AES>(k9, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
_mm_store_si128(output + i + 0, xin0);
|
|
|
|
_mm_store_si128(output + i + 1, xin1);
|
|
|
|
_mm_store_si128(output + i + 2, xin2);
|
|
|
|
_mm_store_si128(output + i + 3, xin3);
|
|
|
|
_mm_store_si128(output + i + 4, xin4);
|
|
|
|
_mm_store_si128(output + i + 5, xin5);
|
|
|
|
_mm_store_si128(output + i + 6, xin6);
|
|
|
|
_mm_store_si128(output + i + 7, xin7);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
template<xmrig::Algo ALGO, size_t MEM, bool SOFT_AES>
|
2017-06-08 06:47:25 +00:00
|
|
|
static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
|
|
|
|
{
|
|
|
|
__m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7;
|
|
|
|
__m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9;
|
|
|
|
|
|
|
|
aes_genkey<SOFT_AES>(output + 2, &k0, &k1, &k2, &k3, &k4, &k5, &k6, &k7, &k8, &k9);
|
|
|
|
|
|
|
|
xout0 = _mm_load_si128(output + 4);
|
|
|
|
xout1 = _mm_load_si128(output + 5);
|
|
|
|
xout2 = _mm_load_si128(output + 6);
|
|
|
|
xout3 = _mm_load_si128(output + 7);
|
|
|
|
xout4 = _mm_load_si128(output + 8);
|
|
|
|
xout5 = _mm_load_si128(output + 9);
|
|
|
|
xout6 = _mm_load_si128(output + 10);
|
|
|
|
xout7 = _mm_load_si128(output + 11);
|
|
|
|
|
2017-06-12 13:19:07 +00:00
|
|
|
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8)
|
2017-06-08 06:47:25 +00:00
|
|
|
{
|
|
|
|
xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0);
|
|
|
|
xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1);
|
|
|
|
xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2);
|
|
|
|
xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3);
|
|
|
|
xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4);
|
|
|
|
xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5);
|
|
|
|
xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6);
|
|
|
|
xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7);
|
|
|
|
|
|
|
|
aes_round<SOFT_AES>(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-19 04:54:11 +00:00
|
|
|
aes_round<SOFT_AES>(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
|
|
|
mix_and_propagate(xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
|
|
|
for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) {
|
|
|
|
xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0);
|
|
|
|
xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1);
|
|
|
|
xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2);
|
|
|
|
xout3 = _mm_xor_si128(_mm_load_si128(input + i + 3), xout3);
|
|
|
|
xout4 = _mm_xor_si128(_mm_load_si128(input + i + 4), xout4);
|
|
|
|
xout5 = _mm_xor_si128(_mm_load_si128(input + i + 5), xout5);
|
|
|
|
xout6 = _mm_xor_si128(_mm_load_si128(input + i + 6), xout6);
|
|
|
|
xout7 = _mm_xor_si128(_mm_load_si128(input + i + 7), xout7);
|
|
|
|
|
|
|
|
aes_round<SOFT_AES>(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-19 04:54:11 +00:00
|
|
|
aes_round<SOFT_AES>(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
mix_and_propagate(xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < 16; i++) {
|
|
|
|
aes_round<SOFT_AES>(k0, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k1, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k2, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k3, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k4, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k5, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k6, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k7, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
|
|
|
aes_round<SOFT_AES>(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-19 04:54:11 +00:00
|
|
|
aes_round<SOFT_AES>(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
mix_and_propagate(xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7);
|
|
|
|
}
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_mm_store_si128(output + 4, xout0);
|
|
|
|
_mm_store_si128(output + 5, xout1);
|
|
|
|
_mm_store_si128(output + 6, xout2);
|
|
|
|
_mm_store_si128(output + 7, xout3);
|
|
|
|
_mm_store_si128(output + 8, xout4);
|
|
|
|
_mm_store_si128(output + 9, xout5);
|
|
|
|
_mm_store_si128(output + 10, xout6);
|
|
|
|
_mm_store_si128(output + 11, xout7);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
template<int SHIFT>
|
2018-04-19 04:54:11 +00:00
|
|
|
static inline void cryptonight_monero_tweak(uint64_t* mem_out, __m128i tmp)
|
|
|
|
{
|
|
|
|
mem_out[0] = EXTRACT64(tmp);
|
|
|
|
|
|
|
|
uint64_t vh = vgetq_lane_u64(tmp, 1);
|
|
|
|
|
|
|
|
uint8_t x = vh >> 24;
|
|
|
|
static const uint16_t table = 0x7531;
|
2018-05-05 15:44:20 +00:00
|
|
|
const uint8_t index = (((x >> SHIFT) & 6) | (x & 1)) << 1;
|
2018-04-19 04:54:11 +00:00
|
|
|
vh ^= ((table >> index) & 0x3) << 28;
|
|
|
|
|
|
|
|
mem_out[1] = vh;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
template<xmrig::Algo ALGO, bool SOFT_AES, int VARIANT>
|
2018-04-15 05:58:17 +00:00
|
|
|
inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx)
|
2017-06-08 06:47:25 +00:00
|
|
|
{
|
2018-04-03 10:51:06 +00:00
|
|
|
constexpr size_t MASK = xmrig::cn_select_mask<ALGO>();
|
2018-06-11 05:00:59 +00:00
|
|
|
constexpr size_t ITERATIONS = xmrig::cn_select_iter<ALGO, VARIANT>();
|
2018-04-03 10:51:06 +00:00
|
|
|
constexpr size_t MEM = xmrig::cn_select_memory<ALGO>();
|
2018-06-11 05:00:59 +00:00
|
|
|
constexpr bool IS_MONERO = xmrig::cn_is_monero<VARIANT>();
|
2018-04-03 10:51:06 +00:00
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO && size < 43) {
|
2018-04-03 10:51:06 +00:00
|
|
|
memset(output, 0, 32);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
xmrig::keccak(input, size, ctx[0]->state);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
2018-03-03 03:10:46 +00:00
|
|
|
VARIANT1_INIT(0);
|
|
|
|
|
2018-04-15 05:58:17 +00:00
|
|
|
cn_explode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) ctx[0]->state, (__m128i*) ctx[0]->memory);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
2018-04-15 05:58:17 +00:00
|
|
|
const uint8_t* l0 = ctx[0]->memory;
|
|
|
|
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx[0]->state);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
uint64_t al0 = h0[0] ^ h0[4];
|
|
|
|
uint64_t ah0 = h0[1] ^ h0[5];
|
|
|
|
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
|
|
|
|
|
|
|
|
uint64_t idx0 = h0[0] ^ h0[4];
|
|
|
|
|
2017-06-12 13:19:07 +00:00
|
|
|
for (size_t i = 0; i < ITERATIONS; i++) {
|
2018-02-08 10:21:12 +00:00
|
|
|
__m128i cx;
|
2017-06-17 12:23:25 +00:00
|
|
|
|
|
|
|
if (SOFT_AES) {
|
2018-01-28 11:58:19 +00:00
|
|
|
cx = soft_aesenc((uint32_t*)&l0[idx0 & MASK], _mm_set_epi64x(ah0, al0));
|
2017-06-17 12:23:25 +00:00
|
|
|
}
|
|
|
|
else {
|
2018-02-08 10:21:12 +00:00
|
|
|
cx = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
|
2018-04-19 04:54:11 +00:00
|
|
|
cx = _mm_aesenc_si128(cx, _mm_set_epi64x(ah0, al0));
|
|
|
|
}
|
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO) {
|
2018-05-05 15:44:20 +00:00
|
|
|
cryptonight_monero_tweak<VARIANT == xmrig::VARIANT_XTL ? 4 : 3>((uint64_t*)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
|
2018-04-19 04:54:11 +00:00
|
|
|
} else {
|
|
|
|
_mm_store_si128((__m128i *)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx));
|
2017-06-17 12:23:25 +00:00
|
|
|
}
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
idx0 = EXTRACT64(cx);
|
|
|
|
bx0 = cx;
|
|
|
|
|
|
|
|
uint64_t hi, lo, cl, ch;
|
|
|
|
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
|
|
|
|
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
|
2017-06-08 20:31:42 +00:00
|
|
|
lo = __umul128(idx0, cl, &hi);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
|
|
|
al0 += hi;
|
|
|
|
ah0 += lo;
|
|
|
|
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
|
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO) {
|
2018-05-05 15:44:20 +00:00
|
|
|
if (VARIANT == xmrig::VARIANT_IPBC) {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
|
|
|
|
}
|
|
|
|
|
2017-06-08 06:47:25 +00:00
|
|
|
al0 ^= cl;
|
2018-05-05 15:44:20 +00:00
|
|
|
ah0 ^= ch;
|
2017-06-08 06:47:25 +00:00
|
|
|
idx0 = al0;
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
2018-06-02 19:56:49 +00:00
|
|
|
const int64x2_t x = vld1q_s64(reinterpret_cast<const int64_t *>(&l0[idx0 & MASK]));
|
|
|
|
const int64_t n = vgetq_lane_s64(x, 0);
|
|
|
|
const int32_t d = vgetq_lane_s32(x, 2);
|
|
|
|
const int64_t q = n / (d | 0x5);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
((int64_t*)&l0[idx0 & MASK])[0] = n ^ q;
|
2018-06-11 05:00:59 +00:00
|
|
|
|
|
|
|
if (VARIANT == xmrig::VARIANT_XHV) {
|
|
|
|
idx0 = (~d) ^ q;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
idx0 = d ^ q;
|
|
|
|
}
|
2018-04-03 10:51:06 +00:00
|
|
|
}
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
2018-04-15 05:58:17 +00:00
|
|
|
cn_implode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state);
|
2017-06-08 06:47:25 +00:00
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
xmrig::keccakf(h0, 24);
|
2018-04-15 05:58:17 +00:00
|
|
|
extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output);
|
2017-06-08 06:47:25 +00:00
|
|
|
}
|
|
|
|
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
template<xmrig::Algo ALGO, bool SOFT_AES, int VARIANT>
|
2018-04-15 05:58:17 +00:00
|
|
|
inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx)
|
2017-06-08 20:31:42 +00:00
|
|
|
{
|
2018-04-03 10:51:06 +00:00
|
|
|
constexpr size_t MASK = xmrig::cn_select_mask<ALGO>();
|
2018-06-11 05:00:59 +00:00
|
|
|
constexpr size_t ITERATIONS = xmrig::cn_select_iter<ALGO, VARIANT>();
|
2018-04-03 10:51:06 +00:00
|
|
|
constexpr size_t MEM = xmrig::cn_select_memory<ALGO>();
|
2018-06-11 05:00:59 +00:00
|
|
|
constexpr bool IS_MONERO = xmrig::cn_is_monero<VARIANT>();
|
2018-04-03 10:51:06 +00:00
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO && size < 43) {
|
2018-04-03 10:51:06 +00:00
|
|
|
memset(output, 0, 64);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
xmrig::keccak(input, size, ctx[0]->state);
|
|
|
|
xmrig::keccak(input + size, size, ctx[1]->state);
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-03-03 03:10:46 +00:00
|
|
|
VARIANT1_INIT(0);
|
|
|
|
VARIANT1_INIT(1);
|
|
|
|
|
2018-04-15 05:58:17 +00:00
|
|
|
const uint8_t* l0 = ctx[0]->memory;
|
|
|
|
const uint8_t* l1 = ctx[1]->memory;
|
|
|
|
uint64_t* h0 = reinterpret_cast<uint64_t*>(ctx[0]->state);
|
|
|
|
uint64_t* h1 = reinterpret_cast<uint64_t*>(ctx[1]->state);
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
cn_explode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) h0, (__m128i*) l0);
|
|
|
|
cn_explode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) h1, (__m128i*) l1);
|
2017-06-08 20:31:42 +00:00
|
|
|
|
|
|
|
uint64_t al0 = h0[0] ^ h0[4];
|
|
|
|
uint64_t al1 = h1[0] ^ h1[4];
|
|
|
|
uint64_t ah0 = h0[1] ^ h0[5];
|
|
|
|
uint64_t ah1 = h1[1] ^ h1[5];
|
|
|
|
|
|
|
|
__m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]);
|
|
|
|
__m128i bx1 = _mm_set_epi64x(h1[3] ^ h1[7], h1[2] ^ h1[6]);
|
|
|
|
|
|
|
|
uint64_t idx0 = h0[0] ^ h0[4];
|
|
|
|
uint64_t idx1 = h1[0] ^ h1[4];
|
|
|
|
|
2017-06-12 13:19:07 +00:00
|
|
|
for (size_t i = 0; i < ITERATIONS; i++) {
|
2018-01-28 11:58:19 +00:00
|
|
|
__m128i cx0, cx1;
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2017-06-17 12:23:25 +00:00
|
|
|
if (SOFT_AES) {
|
2018-01-28 11:58:19 +00:00
|
|
|
cx0 = soft_aesenc((uint32_t*)&l0[idx0 & MASK], _mm_set_epi64x(ah0, al0));
|
|
|
|
cx1 = soft_aesenc((uint32_t*)&l1[idx1 & MASK], _mm_set_epi64x(ah1, al1));
|
2017-06-17 12:23:25 +00:00
|
|
|
}
|
|
|
|
else {
|
2018-02-08 10:21:12 +00:00
|
|
|
cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]);
|
|
|
|
cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]);
|
2018-04-19 04:54:11 +00:00
|
|
|
cx0 = _mm_aesenc_si128(cx0, _mm_set_epi64x(ah0, al0));
|
|
|
|
cx1 = _mm_aesenc_si128(cx1, _mm_set_epi64x(ah1, al1));
|
2017-06-17 12:23:25 +00:00
|
|
|
}
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO) {
|
2018-05-05 15:44:20 +00:00
|
|
|
cryptonight_monero_tweak<VARIANT == xmrig::VARIANT_XTL ? 4 : 3>((uint64_t*)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
|
|
|
|
cryptonight_monero_tweak<VARIANT == xmrig::VARIANT_XTL ? 4 : 3>((uint64_t*)&l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
|
2018-04-19 04:54:11 +00:00
|
|
|
} else {
|
|
|
|
_mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx0, cx0));
|
|
|
|
_mm_store_si128((__m128i *) &l1[idx1 & MASK], _mm_xor_si128(bx1, cx1));
|
|
|
|
};
|
2017-06-08 20:31:42 +00:00
|
|
|
|
|
|
|
idx0 = EXTRACT64(cx0);
|
|
|
|
idx1 = EXTRACT64(cx1);
|
|
|
|
|
|
|
|
bx0 = cx0;
|
|
|
|
bx1 = cx1;
|
|
|
|
|
|
|
|
uint64_t hi, lo, cl, ch;
|
|
|
|
cl = ((uint64_t*) &l0[idx0 & MASK])[0];
|
|
|
|
ch = ((uint64_t*) &l0[idx0 & MASK])[1];
|
|
|
|
lo = __umul128(idx0, cl, &hi);
|
|
|
|
|
|
|
|
al0 += hi;
|
|
|
|
ah0 += lo;
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
((uint64_t*)&l0[idx0 & MASK])[0] = al0;
|
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO) {
|
2018-05-05 15:44:20 +00:00
|
|
|
if (VARIANT == xmrig::VARIANT_IPBC) {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l0[idx0 & MASK])[1] = ah0;
|
|
|
|
}
|
2017-06-08 20:31:42 +00:00
|
|
|
|
|
|
|
al0 ^= cl;
|
2018-05-05 15:44:20 +00:00
|
|
|
ah0 ^= ch;
|
2017-06-08 20:31:42 +00:00
|
|
|
idx0 = al0;
|
|
|
|
|
2018-06-02 22:42:10 +00:00
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
|
|
|
const int64x2_t x = vld1q_s64(reinterpret_cast<const int64_t *>(&l0[idx0 & MASK]));
|
|
|
|
const int64_t n = vgetq_lane_s64(x, 0);
|
|
|
|
const int32_t d = vgetq_lane_s32(x, 2);
|
|
|
|
const int64_t q = n / (d | 0x5);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
((int64_t*)&l0[idx0 & MASK])[0] = n ^ q;
|
2018-06-11 05:00:59 +00:00
|
|
|
|
|
|
|
if (VARIANT == xmrig::VARIANT_XHV) {
|
|
|
|
idx0 = (~d) ^ q;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
idx0 = d ^ q;
|
|
|
|
}
|
2018-04-03 10:51:06 +00:00
|
|
|
}
|
|
|
|
|
2017-06-08 20:31:42 +00:00
|
|
|
cl = ((uint64_t*) &l1[idx1 & MASK])[0];
|
|
|
|
ch = ((uint64_t*) &l1[idx1 & MASK])[1];
|
|
|
|
lo = __umul128(idx1, cl, &hi);
|
|
|
|
|
|
|
|
al1 += hi;
|
|
|
|
ah1 += lo;
|
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
((uint64_t*)&l1[idx1 & MASK])[0] = al1;
|
|
|
|
|
2018-06-11 05:00:59 +00:00
|
|
|
if (IS_MONERO) {
|
2018-05-05 15:44:20 +00:00
|
|
|
if (VARIANT == xmrig::VARIANT_IPBC) {
|
|
|
|
((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1 ^ al1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
((uint64_t*)&l1[idx1 & MASK])[1] = ah1;
|
|
|
|
}
|
2017-06-08 20:31:42 +00:00
|
|
|
|
|
|
|
al1 ^= cl;
|
2018-05-05 15:44:20 +00:00
|
|
|
ah1 ^= ch;
|
2017-06-08 20:31:42 +00:00
|
|
|
idx1 = al1;
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
if (ALGO == xmrig::CRYPTONIGHT_HEAVY) {
|
2018-06-02 22:42:10 +00:00
|
|
|
const int64x2_t x = vld1q_s64(reinterpret_cast<const int64_t *>(&l1[idx1 & MASK]));
|
|
|
|
const int64_t n = vgetq_lane_s64(x, 0);
|
|
|
|
const int32_t d = vgetq_lane_s32(x, 2);
|
|
|
|
const int64_t q = n / (d | 0x5);
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
((int64_t*)&l1[idx1 & MASK])[0] = n ^ q;
|
2018-06-11 05:00:59 +00:00
|
|
|
|
|
|
|
if (VARIANT == xmrig::VARIANT_XHV) {
|
|
|
|
idx1 = (~d) ^ q;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
idx1 = d ^ q;
|
|
|
|
}
|
2018-04-03 10:51:06 +00:00
|
|
|
}
|
2017-06-08 20:31:42 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
cn_implode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) l0, (__m128i*) h0);
|
|
|
|
cn_implode_scratchpad<ALGO, MEM, SOFT_AES>((__m128i*) l1, (__m128i*) h1);
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-05-05 15:44:20 +00:00
|
|
|
xmrig::keccakf(h0, 24);
|
|
|
|
xmrig::keccakf(h1, 24);
|
2017-06-08 20:31:42 +00:00
|
|
|
|
2018-04-15 05:58:17 +00:00
|
|
|
extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output);
|
|
|
|
extra_hashes[ctx[1]->state[0] & 3](ctx[1]->state, 200, output + 32);
|
2017-06-08 20:31:42 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 10:51:06 +00:00
|
|
|
|
|
|
|
template<xmrig::Algo ALGO, bool SOFT_AES, int VARIANT>
|
2018-04-15 05:58:17 +00:00
|
|
|
inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx)
|
2018-04-03 10:51:06 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<xmrig::Algo ALGO, bool SOFT_AES, int VARIANT>
|
2018-04-15 05:58:17 +00:00
|
|
|
inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx)
|
2018-04-03 10:51:06 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<xmrig::Algo ALGO, bool SOFT_AES, int VARIANT>
|
2018-04-15 05:58:17 +00:00
|
|
|
inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx)
|
2018-04-03 10:51:06 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-11-26 19:23:23 +00:00
|
|
|
#endif /* __CRYPTONIGHT_ARM_H__ */
|