Merge branch 'feature-cn-gpu' into dev

This commit is contained in:
XMRig 2019-02-04 00:14:14 +07:00
commit 885a2cab21
21 changed files with 926 additions and 33 deletions

View file

@ -5,6 +5,7 @@ option(WITH_LIBCPUID "Use Libcpuid" ON)
option(WITH_AEON "CryptoNight-Lite support" ON)
option(WITH_SUMO "CryptoNight-Heavy support" ON)
option(WITH_CN_PICO "CryptoNight-Pico support" ON)
option(WITH_CN_GPU "CryptoNight-GPU support" ON)
option(WITH_HTTPD "HTTP REST API" ON)
option(WITH_DEBUG_LOG "Enable debug log output" OFF)
option(WITH_TLS "Enable OpenSSL support" ON)
@ -200,6 +201,7 @@ endif()
include(cmake/OpenSSL.cmake)
include(cmake/asm.cmake)
include(cmake/cn-gpu.cmake)
CHECK_INCLUDE_FILE (syslog.h HAVE_SYSLOG_H)
if (HAVE_SYSLOG_H)
@ -262,5 +264,5 @@ if (WITH_DEBUG_LOG)
add_definitions(/DAPP_DEBUG)
endif()
add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${HTTPD_SOURCES} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES})
add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${HTTPD_SOURCES} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES} ${CN_GPU_SOURCES})
target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${MHD_LIBRARY} ${EXTRA_LIBS} ${CPUID_LIB})

25
cmake/cn-gpu.cmake Normal file
View file

@ -0,0 +1,25 @@
if (WITH_CN_GPU AND CMAKE_SIZEOF_VOID_P EQUAL 8)
if (XMRIG_ARM)
set(CN_GPU_SOURCES src/crypto/cn_gpu_arm.cpp)
if (CMAKE_CXX_COMPILER_ID MATCHES GNU OR CMAKE_CXX_COMPILER_ID MATCHES Clang)
set_source_files_properties(src/crypto/cn_gpu_arm.cpp PROPERTIES COMPILE_FLAGS "-O3")
endif()
else()
set(CN_GPU_SOURCES src/crypto/cn_gpu_avx.cpp src/crypto/cn_gpu_ssse3.cpp)
if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
set_source_files_properties(src/crypto/cn_gpu_avx.cpp PROPERTIES COMPILE_FLAGS "-O3 -mavx2")
set_source_files_properties(src/crypto/cn_gpu_ssse3.cpp PROPERTIES COMPILE_FLAGS "-O3")
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
set_source_files_properties(src/crypto/cn_gpu_avx.cpp PROPERTIES COMPILE_FLAGS "-mavx2")
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set_source_files_properties(src/crypto/cn_gpu_avx.cpp PROPERTIES COMPILE_FLAGS "/arch:AVX")
endif()
endif()
else()
set(CN_GPU_SOURCES "")
add_definitions(/DXMRIG_NO_CN_GPU)
endif()

View file

@ -4,9 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -74,18 +74,25 @@ static void print_cpu(xmrig::Config *config)
using namespace xmrig;
if (config->isColors()) {
Log::i()->text(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%d)") " %sx64 %sAES",
Log::i()->text(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%d)") " %sx64 %sAES %sAVX2",
"CPU",
Cpu::info()->brand(),
Cpu::info()->sockets(),
Cpu::info()->isX64() ? "\x1B[1;32m" : "\x1B[1;31m-",
Cpu::info()->hasAES() ? "\x1B[1;32m" : "\x1B[1;31m-");
Cpu::info()->hasAES() ? "\x1B[1;32m" : "\x1B[1;31m-",
Cpu::info()->hasAVX2() ? "\x1B[1;32m" : "\x1B[1;31m-");
# ifndef XMRIG_NO_LIBCPUID
Log::i()->text(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%.1f MB/%.1f MB"), "CPU L2/L3", Cpu::info()->L2() / 1024.0, Cpu::info()->L3() / 1024.0);
# endif
}
else {
Log::i()->text(" * %-13s%s (%d) %sx64 %sAES", "CPU", Cpu::info()->brand(), Cpu::info()->sockets(), Cpu::info()->isX64() ? "" : "-", Cpu::info()->hasAES() ? "" : "-");
Log::i()->text(" * %-13s%s (%d) %sx64 %sAES %sAVX2",
"CPU",
Cpu::info()->brand(),
Cpu::info()->sockets(),
Cpu::info()->isX64() ? "" : "-",
Cpu::info()->hasAES() ? "" : "-",
Cpu::info()->hasAVX2() ? "" : "-");
# ifndef XMRIG_NO_LIBCPUID
Log::i()->text(" * %-13s%.1f MB/%.1f MB", "CPU L2/L3", Cpu::info()->L2() / 1024.0, Cpu::info()->L3() / 1024.0);
# endif

View file

@ -4,8 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -35,6 +36,10 @@
# define bit_AES (1 << 25)
#endif
#ifndef bit_AVX2
# define bit_AVX2 (1 << 5)
#endif
#include "common/cpu/BasicCpuInfo.h"
@ -93,9 +98,19 @@ static inline bool has_aes_ni()
}
static inline bool has_avx2()
{
int32_t cpu_info[4] = { 0 };
cpuid(EXTENDED_FEATURES, cpu_info);
return (cpu_info[EBX_Reg] & bit_AVX2) != 0;
}
xmrig::BasicCpuInfo::BasicCpuInfo() :
m_assembly(ASM_NONE),
m_aes(has_aes_ni()),
m_avx2(has_avx2()),
m_brand(),
m_threads(std::thread::hardware_concurrency())
{

View file

@ -4,8 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -41,6 +42,7 @@ protected:
inline Assembly assembly() const override { return m_assembly; }
inline bool hasAES() const override { return m_aes; }
inline bool hasAVX2() const override { return m_avx2; }
inline bool isSupported() const override { return true; }
inline const char *brand() const override { return m_brand; }
inline int32_t cores() const override { return -1; }
@ -59,6 +61,7 @@ protected:
private:
Assembly m_assembly;
bool m_aes;
bool m_avx2;
char m_brand[64];
int32_t m_threads;
};

View file

@ -4,8 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -30,6 +31,7 @@
xmrig::BasicCpuInfo::BasicCpuInfo() :
m_aes(false),
m_avx2(false),
m_brand(),
m_threads(std::thread::hardware_concurrency())
{

View file

@ -86,6 +86,10 @@ static AlgoData const algorithms[] = {
{ "cryptonight-ultralite", "cn-ultralite", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL },
{ "cryptonight_turtle", "cn_turtle", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL },
# endif
# ifndef XMRIG_NO_CN_GPU
{ "cryptonight/gpu", "cn/gpu", xmrig::CRYPTONIGHT, xmrig::VARIANT_GPU },
# endif
};
@ -120,7 +124,8 @@ static const char *variants[] = {
"rto",
"2",
"half",
"trtl"
"trtl",
"gpu"
};

View file

@ -4,7 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2016-2018 XMRig <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -40,6 +42,7 @@ public:
virtual ~ICpuInfo() {}
virtual bool hasAES() const = 0;
virtual bool hasAVX2() const = 0;
virtual bool isSupported() const = 0;
virtual bool isX64() const = 0;
virtual const char *brand() const = 0;

View file

@ -4,7 +4,7 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
@ -419,6 +419,7 @@ void Pool::rebuild()
addVariant(xmrig::VARIANT_XHV);
addVariant(xmrig::VARIANT_XAO);
addVariant(xmrig::VARIANT_RTO);
addVariant(xmrig::VARIANT_GPU);
addVariant(xmrig::VARIANT_AUTO);
# endif
}

View file

@ -73,6 +73,7 @@ enum Variant {
VARIANT_2 = 8, // CryptoNight variant 2
VARIANT_HALF = 9, // CryptoNight variant 2 with half iterations (Masari/Stellite)
VARIANT_TRTL = 10, // CryptoNight Turtle (TRTL)
VARIANT_GPU = 11, // CryptoNight-GPU (Ryo)
VARIANT_MAX
};

View file

@ -4,9 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -33,6 +33,7 @@
xmrig::AdvancedCpuInfo::AdvancedCpuInfo() :
m_assembly(ASM_NONE),
m_aes(false),
m_avx2(false),
m_L2_exclusive(false),
m_brand(),
m_cores(0),
@ -83,6 +84,8 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() :
m_assembly = ASM_INTEL;
}
}
m_avx2 = data.flags[CPU_FEATURE_AVX2];
}

View file

@ -4,8 +4,9 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2016-2018 XMRig <https://github.com/xmrig>, <support@xmrig.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -41,6 +42,7 @@ protected:
inline Assembly assembly() const override { return m_assembly; }
inline bool hasAES() const override { return m_aes; }
inline bool hasAVX2() const override { return m_avx2; }
inline bool isSupported() const override { return true; }
inline const char *brand() const override { return m_brand; }
inline int32_t cores() const override { return m_cores; }
@ -59,6 +61,7 @@ protected:
private:
Assembly m_assembly;
bool m_aes;
bool m_avx2;
bool m_L2_exclusive;
char m_brand[64];
int32_t m_cores;

View file

@ -5,9 +5,9 @@
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2016 Imran Yusuff <https://github.com/imranyusuff>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018 SChernykh <https://github.com/SChernykh>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
@ -284,6 +284,34 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
}
#ifndef XMRIG_NO_CN_GPU
template<xmrig::Algo ALGO, size_t MEM>
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
{
constexpr size_t hash_size = 200; // 25x8 bytes
alignas(16) uint64_t hash[25];
for (uint64_t i = 0; i < MEM / 512; i++)
{
memcpy(hash, input, hash_size);
hash[0] ^= i;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 160);
output += 160;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
}
}
#endif
template<xmrig::Algo ALGO, size_t MEM, bool SOFT_AES>
static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
{
@ -541,6 +569,33 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
}
#ifndef XMRIG_NO_CN_GPU
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad);
template<xmrig::Algo ALGO, bool SOFT_AES, xmrig::Variant VARIANT>
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx)
{
constexpr size_t MASK = xmrig::CRYPTONIGHT_GPU_MASK;
constexpr size_t ITERATIONS = xmrig::cn_select_iter<ALGO, VARIANT>();
constexpr size_t MEM = xmrig::cn_select_memory<ALGO>();
static_assert(MASK > 0 && ITERATIONS > 0 && MEM > 0, "unsupported algorithm/variant");
xmrig::keccak(input, size, ctx[0]->state);
cn_explode_scratchpad_gpu<ALGO, MEM>(ctx[0]->state, ctx[0]->memory);
cn_gpu_inner_arm<ITERATIONS, MASK>(ctx[0]->state, ctx[0]->memory);
cn_implode_scratchpad<xmrig::CRYPTONIGHT_HEAVY, MEM, SOFT_AES>((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state);
xmrig::keccakf((uint64_t*) ctx[0]->state, 24);
memcpy(output, ctx[0]->state, 32);
}
#endif
template<xmrig::Algo ALGO, bool SOFT_AES, xmrig::Variant VARIANT>
inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx)
{

View file

@ -4,7 +4,7 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
@ -27,6 +27,7 @@
#define XMRIG_CRYPTONIGHT_CONSTANTS_H
#include <stddef.h>
#include <stdint.h>
@ -42,6 +43,9 @@ constexpr const uint32_t CRYPTONIGHT_ITER = 0x80000;
constexpr const uint32_t CRYPTONIGHT_HALF_ITER = 0x40000;
constexpr const uint32_t CRYPTONIGHT_XAO_ITER = 0x100000;
constexpr const uint32_t CRYPTONIGHT_GPU_ITER = 0xC000;
constexpr const uint32_t CRYPTONIGHT_GPU_MASK = 0x1FFFC0;
constexpr const size_t CRYPTONIGHT_LITE_MEMORY = 1 * 1024 * 1024;
constexpr const uint32_t CRYPTONIGHT_LITE_MASK = 0xFFFF0;
constexpr const uint32_t CRYPTONIGHT_LITE_ITER = 0x40000;
@ -127,6 +131,7 @@ template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_HALF>()
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_MSR>() { return CRYPTONIGHT_HALF_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_XAO>() { return CRYPTONIGHT_XAO_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_RTO>() { return CRYPTONIGHT_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT, VARIANT_GPU>() { return CRYPTONIGHT_GPU_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_LITE, VARIANT_0>() { return CRYPTONIGHT_LITE_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_LITE, VARIANT_1>() { return CRYPTONIGHT_LITE_ITER; }
template<> inline constexpr uint32_t cn_select_iter<CRYPTONIGHT_HEAVY, VARIANT_0>() { return CRYPTONIGHT_HEAVY_ITER; }
@ -142,6 +147,9 @@ inline uint32_t cn_select_iter(Algo algorithm, Variant variant)
case VARIANT_HALF:
return CRYPTONIGHT_HALF_ITER;
case VARIANT_GPU:
return CRYPTONIGHT_GPU_ITER;
case VARIANT_RTO:
return CRYPTONIGHT_XAO_ITER;
@ -183,6 +191,7 @@ template<> inline constexpr Variant cn_base_variant<VARIANT_RTO>() { return VA
template<> inline constexpr Variant cn_base_variant<VARIANT_2>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_HALF>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_TRTL>() { return VARIANT_2; }
template<> inline constexpr Variant cn_base_variant<VARIANT_GPU>() { return VARIANT_GPU; }
} /* namespace xmrig */

View file

@ -272,4 +272,21 @@ const static uint8_t test_output_pico_trtl[160] = {
#endif
#ifndef XMRIG_NO_CN_GPU
// "cn/gpu"
const static uint8_t test_output_gpu[160] = {
0xE5, 0x5C, 0xB2, 0x3E, 0x51, 0x64, 0x9A, 0x59, 0xB1, 0x27, 0xB9, 0x6B, 0x51, 0x5F, 0x2B, 0xF7,
0xBF, 0xEA, 0x19, 0x97, 0x41, 0xA0, 0x21, 0x6C, 0xF8, 0x38, 0xDE, 0xD0, 0x6E, 0xFF, 0x82, 0xDF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
#endif
#endif /* XMRIG_CRYPTONIGHT_TEST_H */

View file

@ -4,7 +4,7 @@
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2018 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018 Lee Clagett <https://github.com/vtnerd>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <https://github.com/xmrig>, <support@xmrig.com>
@ -35,6 +35,7 @@
#endif
#include "common/cpu/Cpu.h"
#include "common/crypto/keccak.h"
#include "crypto/CryptoNight.h"
#include "crypto/CryptoNight_constants.h"
@ -289,6 +290,34 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output)
}
#ifndef XMRIG_NO_CN_GPU
template<xmrig::Algo ALGO, size_t MEM>
void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output)
{
constexpr size_t hash_size = 200; // 25x8 bytes
alignas(16) uint64_t hash[25];
for (uint64_t i = 0; i < MEM / 512; i++)
{
memcpy(hash, input, hash_size);
hash[0] ^= i;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 160);
output += 160;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
xmrig::keccakf(hash, 24);
memcpy(output, hash, 176);
output += 176;
}
}
#endif
template<xmrig::Algo ALGO, size_t MEM, bool SOFT_AES>
static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output)
{
@ -566,6 +595,41 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si
}
#ifndef XMRIG_NO_CN_GPU
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_avx(const uint8_t *spad, uint8_t *lpad);
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_ssse3(const uint8_t *spad, uint8_t *lpad);
template<xmrig::Algo ALGO, bool SOFT_AES, xmrig::Variant VARIANT>
inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx)
{
constexpr size_t MASK = xmrig::CRYPTONIGHT_GPU_MASK;
constexpr size_t ITERATIONS = xmrig::cn_select_iter<ALGO, VARIANT>();
constexpr size_t MEM = xmrig::cn_select_memory<ALGO>();
static_assert(MASK > 0 && ITERATIONS > 0 && MEM > 0, "unsupported algorithm/variant");
xmrig::keccak(input, size, ctx[0]->state);
cn_explode_scratchpad_gpu<ALGO, MEM>(ctx[0]->state, ctx[0]->memory);
if (xmrig::Cpu::info()->hasAVX2()) {
cn_gpu_inner_avx<ITERATIONS, MASK>(ctx[0]->state, ctx[0]->memory);
} else {
cn_gpu_inner_ssse3<ITERATIONS, MASK>(ctx[0]->state, ctx[0]->memory);
}
cn_implode_scratchpad<xmrig::CRYPTONIGHT_HEAVY, MEM, SOFT_AES>((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state);
xmrig::keccakf((uint64_t*) ctx[0]->state, 24);
memcpy(output, ctx[0]->state, 32);
}
#endif
#ifndef XMRIG_NO_ASM
extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx *ctx);
extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx *ctx);

240
src/crypto/cn_gpu_arm.cpp Normal file
View file

@ -0,0 +1,240 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <arm_neon.h>
#include "crypto/CryptoNight_constants.h"
inline void vandq_f32(float32x4_t &v, uint32_t v2)
{
uint32x4_t vc = vdupq_n_u32(v2);
v = (float32x4_t)vandq_u32((uint32x4_t)v, vc);
}
inline void vorq_f32(float32x4_t &v, uint32_t v2)
{
uint32x4_t vc = vdupq_n_u32(v2);
v = (float32x4_t)vorrq_u32((uint32x4_t)v, vc);
}
template <size_t v>
inline void vrot_si32(int32x4_t &r)
{
r = (int32x4_t)vextq_s8((int8x16_t)r, (int8x16_t)r, v);
}
template <>
inline void vrot_si32<0>(int32x4_t &r)
{
}
inline uint32_t vheor_s32(const int32x4_t &v)
{
int32x4_t v0 = veorq_s32(v, vrev64q_s32(v));
int32x2_t vf = veor_s32(vget_high_s32(v0), vget_low_s32(v0));
return (uint32_t)vget_lane_s32(vf, 0);
}
inline void prep_dv(int32_t *idx, int32x4_t &v, float32x4_t &n)
{
v = vld1q_s32(idx);
n = vcvtq_f32_s32(v);
}
inline void sub_round(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &n, float32x4_t &d, float32x4_t &c)
{
float32x4_t ln1 = vaddq_f32(n1, c);
float32x4_t nn = vmulq_f32(n0, c);
nn = vmulq_f32(ln1, vmulq_f32(nn, nn));
vandq_f32(nn, 0xFEFFFFFF);
vorq_f32(nn, 0x00800000);
n = vaddq_f32(n, nn);
float32x4_t ln3 = vsubq_f32(n3, c);
float32x4_t dd = vmulq_f32(n2, c);
dd = vmulq_f32(ln3, vmulq_f32(dd, dd));
vandq_f32(dd, 0xFEFFFFFF);
vorq_f32(dd, 0x00800000);
d = vaddq_f32(d, dd);
//Constant feedback
c = vaddq_f32(c, rnd_c);
c = vaddq_f32(c, vdupq_n_f32(0.734375f));
float32x4_t r = vaddq_f32(nn, dd);
vandq_f32(r, 0x807FFFFF);
vorq_f32(r, 0x40000000);
c = vaddq_f32(c, r);
}
inline void round_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, const float32x4_t &rnd_c, float32x4_t &c, float32x4_t &r)
{
float32x4_t n = vdupq_n_f32(0.0f), d = vdupq_n_f32(0.0f);
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
vandq_f32(d, 0xFF7FFFFF);
vorq_f32(d, 0x40000000);
r = vaddq_f32(r, vdivq_f32(n, d));
}
// 112×4 = 448
template <bool add>
inline int32x4_t single_compute(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum)
{
float32x4_t c = vdupq_n_f32(cnt);
float32x4_t r = vdupq_n_f32(0.0f);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
vandq_f32(r, 0x807FFFFF);
vorq_f32(r, 0x40000000);
if (add) {
sum = vaddq_f32(sum, r);
} else {
sum = r;
}
const float32x4_t cc2 = vdupq_n_f32(536870880.0f);
r = vmulq_f32(r, cc2); // 35
return vcvtq_s32_f32(r);
}
template<size_t rot>
inline void single_compute_wrap(const float32x4_t &n0, const float32x4_t &n1, const float32x4_t &n2, const float32x4_t &n3, float cnt, const float32x4_t &rnd_c, float32x4_t &sum, int32x4_t &out)
{
int32x4_t r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
vrot_si32<rot>(r);
out = veorq_s32(out, r);
}
template<uint32_t MASK>
inline int32_t *scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<int32_t *>(lpad + (idx & MASK) + n * 16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
int32_t *idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
int32_t *idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
int32_t *idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
int32_t *idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
float32x4_t sum0 = vdupq_n_f32(0.0f);
for (size_t i = 0; i < ITER; i++) {
float32x4_t n0, n1, n2, n3;
int32x4_t v0, v1, v2, v3;
float32x4_t suma, sumb, sum1, sum2, sum3;
prep_dv(idx0, v0, n0);
prep_dv(idx1, v1, n1);
prep_dv(idx2, v2, n2);
prep_dv(idx3, v3, n3);
float32x4_t rc = sum0;
int32x4_t out, out2;
out = vdupq_n_s32(0);
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
sum0 = vaddq_f32(suma, sumb);
vst1q_s32(idx0, veorq_s32(v0, out));
out2 = out;
out = vdupq_n_s32(0);
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
sum1 = vaddq_f32(suma, sumb);
vst1q_s32(idx1, veorq_s32(v1, out));
out2 = veorq_s32(out2, out);
out = vdupq_n_s32(0);
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
sum2 = vaddq_f32(suma, sumb);
vst1q_s32(idx2, veorq_s32(v2, out));
out2 = veorq_s32(out2, out);
out = vdupq_n_s32(0);
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
sum3 = vaddq_f32(suma, sumb);
vst1q_s32(idx3, veorq_s32(v3, out));
out2 = veorq_s32(out2, out);
sum0 = vaddq_f32(sum0, sum1);
sum2 = vaddq_f32(sum2, sum3);
sum0 = vaddq_f32(sum0, sum2);
const float32x4_t cc1 = vdupq_n_f32(16777216.0f);
const float32x4_t cc2 = vdupq_n_f32(64.0f);
vandq_f32(sum0, 0x7fffffff); // take abs(va) by masking the float sign bit
// vs range 0 - 64
n0 = vmulq_f32(sum0, cc1);
v0 = vcvtq_s32_f32(n0);
v0 = veorq_s32(v0, out2);
uint32_t n = vheor_s32(v0);
// vs is now between 0 and 1
sum0 = vdivq_f32(sum0, cc2);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
}
}
template void cn_gpu_inner_arm<xmrig::CRYPTONIGHT_GPU_ITER, xmrig::CRYPTONIGHT_GPU_MASK>(const uint8_t* spad, uint8_t* lpad);

203
src/crypto/cn_gpu_avx.cpp Normal file
View file

@ -0,0 +1,203 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/CryptoNight_constants.h"
#ifdef __GNUC__
# include <x86intrin.h>
#else
# include <intrin.h>
# define __restrict__ __restrict
#endif
inline void prep_dv_avx(__m256i* idx, __m256i& v, __m256& n01)
{
v = _mm256_load_si256(idx);
n01 = _mm256_cvtepi32_ps(v);
}
inline __m256 fma_break(const __m256& x)
{
// Break the dependency chain by setitng the exp to ?????01
__m256 xx = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFEFFFFFF)), x);
return _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x00800000)), xx);
}
// 14
inline void sub_round(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& n, __m256& d, __m256& c)
{
__m256 nn = _mm256_mul_ps(n0, c);
nn = _mm256_mul_ps(_mm256_add_ps(n1, c), _mm256_mul_ps(nn, nn));
nn = fma_break(nn);
n = _mm256_add_ps(n, nn);
__m256 dd = _mm256_mul_ps(n2, c);
dd = _mm256_mul_ps(_mm256_sub_ps(n3, c), _mm256_mul_ps(dd, dd));
dd = fma_break(dd);
d = _mm256_add_ps(d, dd);
//Constant feedback
c = _mm256_add_ps(c, rnd_c);
c = _mm256_add_ps(c, _mm256_set1_ps(0.734375f));
__m256 r = _mm256_add_ps(nn, dd);
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
c = _mm256_add_ps(c, r);
}
// 14*8 + 2 = 112
inline void round_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3, const __m256& rnd_c, __m256& c, __m256& r)
{
__m256 n = _mm256_setzero_ps(), d = _mm256_setzero_ps();
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
d = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0xFF7FFFFF)), d);
d = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), d);
r = _mm256_add_ps(r, _mm256_div_ps(n, d));
}
// 112×4 = 448
template <bool add>
inline __m256i double_compute(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum)
{
__m256 c = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_set1_ps(lcnt)), _mm_set1_ps(hcnt), 1);
__m256 r = _mm256_setzero_ps();
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
r = _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x807FFFFF)), r);
r = _mm256_or_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x40000000)), r);
if(add)
sum = _mm256_add_ps(sum, r);
else
sum = r;
r = _mm256_mul_ps(r, _mm256_set1_ps(536870880.0f)); // 35
return _mm256_cvttps_epi32(r);
}
template <size_t rot>
inline void double_compute_wrap(const __m256& n0, const __m256& n1, const __m256& n2, const __m256& n3,
float lcnt, float hcnt, const __m256& rnd_c, __m256& sum, __m256i& out)
{
__m256i r = double_compute<rot % 2 != 0>(n0, n1, n2, n3, lcnt, hcnt, rnd_c, sum);
if(rot != 0)
r = _mm256_or_si256(_mm256_bslli_epi128(r, 16 - rot), _mm256_bsrli_epi128(r, rot));
out = _mm256_xor_si256(out, r);
}
template<uint32_t MASK>
inline __m256i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m256i*>(lpad + (idx & MASK) + n*16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
__m256i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
__m256i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
__m256 sum0 = _mm256_setzero_ps();
for(size_t i = 0; i < ITER; i++)
{
__m256i v01, v23;
__m256 suma, sumb, sum1;
__m256 rc = sum0;
__m256 n01, n23;
prep_dv_avx(idx0, v01, n01);
prep_dv_avx(idx2, v23, n23);
__m256i out, out2;
__m256 n10, n22, n33;
n10 = _mm256_permute2f128_ps(n01, n01, 0x01);
n22 = _mm256_permute2f128_ps(n23, n23, 0x00);
n33 = _mm256_permute2f128_ps(n23, n23, 0x11);
out = _mm256_setzero_si256();
double_compute_wrap<0>(n01, n10, n22, n33, 1.3437500f, 1.4296875f, rc, suma, out);
double_compute_wrap<1>(n01, n22, n33, n10, 1.2812500f, 1.3984375f, rc, suma, out);
double_compute_wrap<2>(n01, n33, n10, n22, 1.3593750f, 1.3828125f, rc, sumb, out);
double_compute_wrap<3>(n01, n33, n22, n10, 1.3671875f, 1.3046875f, rc, sumb, out);
_mm256_store_si256(idx0, _mm256_xor_si256(v01, out));
sum0 = _mm256_add_ps(suma, sumb);
out2 = out;
__m256 n11, n02, n30;
n11 = _mm256_permute2f128_ps(n01, n01, 0x11);
n02 = _mm256_permute2f128_ps(n01, n23, 0x20);
n30 = _mm256_permute2f128_ps(n01, n23, 0x03);
out = _mm256_setzero_si256();
double_compute_wrap<0>(n23, n11, n02, n30, 1.4140625f, 1.3203125f, rc, suma, out);
double_compute_wrap<1>(n23, n02, n30, n11, 1.2734375f, 1.3515625f, rc, suma, out);
double_compute_wrap<2>(n23, n30, n11, n02, 1.2578125f, 1.3359375f, rc, sumb, out);
double_compute_wrap<3>(n23, n30, n02, n11, 1.2890625f, 1.4609375f, rc, sumb, out);
_mm256_store_si256(idx2, _mm256_xor_si256(v23, out));
sum1 = _mm256_add_ps(suma, sumb);
out2 = _mm256_xor_si256(out2, out);
out2 = _mm256_xor_si256(_mm256_permute2x128_si256(out2,out2,0x41), out2);
suma = _mm256_permute2f128_ps(sum0, sum1, 0x30);
sumb = _mm256_permute2f128_ps(sum0, sum1, 0x21);
sum0 = _mm256_add_ps(suma, sumb);
sum0 = _mm256_add_ps(sum0, _mm256_permute2f128_ps(sum0, sum0, 0x41));
// Clear the high 128 bits
__m128 sum = _mm256_castps256_ps128(sum0);
sum = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum); // take abs(va) by masking the float sign bit
// vs range 0 - 64
__m128i v0 = _mm_cvttps_epi32(_mm_mul_ps(sum, _mm_set1_ps(16777216.0f)));
v0 = _mm_xor_si128(v0, _mm256_castsi256_si128(out2));
__m128i v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
v0 = _mm_xor_si128(v0, v1);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
v0 = _mm_xor_si128(v0, v1);
// vs is now between 0 and 1
sum = _mm_div_ps(sum, _mm_set1_ps(64.0f));
sum0 = _mm256_insertf128_ps(_mm256_castps128_ps256(sum), sum, 1);
uint32_t n = _mm_cvtsi128_si32(v0);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
}
}
template void cn_gpu_inner_avx<xmrig::CRYPTONIGHT_GPU_ITER, xmrig::CRYPTONIGHT_GPU_MASK>(const uint8_t* spad, uint8_t* lpad);

210
src/crypto/cn_gpu_ssse3.cpp Normal file
View file

@ -0,0 +1,210 @@
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "crypto/CryptoNight_constants.h"
#ifdef __GNUC__
# include <x86intrin.h>
#else
# include <intrin.h>
# define __restrict__ __restrict
#endif
inline void prep_dv(__m128i* idx, __m128i& v, __m128& n)
{
v = _mm_load_si128(idx);
n = _mm_cvtepi32_ps(v);
}
inline __m128 fma_break(__m128 x)
{
// Break the dependency chain by setitng the exp to ?????01
x = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFEFFFFFF)), x);
return _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x00800000)), x);
}
// 14
inline void sub_round(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& n, __m128& d, __m128& c)
{
n1 = _mm_add_ps(n1, c);
__m128 nn = _mm_mul_ps(n0, c);
nn = _mm_mul_ps(n1, _mm_mul_ps(nn,nn));
nn = fma_break(nn);
n = _mm_add_ps(n, nn);
n3 = _mm_sub_ps(n3, c);
__m128 dd = _mm_mul_ps(n2, c);
dd = _mm_mul_ps(n3, _mm_mul_ps(dd,dd));
dd = fma_break(dd);
d = _mm_add_ps(d, dd);
//Constant feedback
c = _mm_add_ps(c, rnd_c);
c = _mm_add_ps(c, _mm_set1_ps(0.734375f));
__m128 r = _mm_add_ps(nn, dd);
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
c = _mm_add_ps(c, r);
}
// 14*8 + 2 = 112
inline void round_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, __m128 rnd_c, __m128& c, __m128& r)
{
__m128 n = _mm_setzero_ps(), d = _mm_setzero_ps();
sub_round(n0, n1, n2, n3, rnd_c, n, d, c);
sub_round(n1, n2, n3, n0, rnd_c, n, d, c);
sub_round(n2, n3, n0, n1, rnd_c, n, d, c);
sub_round(n3, n0, n1, n2, rnd_c, n, d, c);
sub_round(n3, n2, n1, n0, rnd_c, n, d, c);
sub_round(n2, n1, n0, n3, rnd_c, n, d, c);
sub_round(n1, n0, n3, n2, rnd_c, n, d, c);
sub_round(n0, n3, n2, n1, rnd_c, n, d, c);
// Make sure abs(d) > 2.0 - this prevents division by zero and accidental overflows by division by < 1.0
d = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0xFF7FFFFF)), d);
d = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), d);
r =_mm_add_ps(r, _mm_div_ps(n,d));
}
// 112×4 = 448
template<bool add>
inline __m128i single_compute(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum)
{
__m128 c = _mm_set1_ps(cnt);
__m128 r = _mm_setzero_ps();
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
round_compute(n0, n1, n2, n3, rnd_c, c, r);
// do a quick fmod by setting exp to 2
r = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)), r);
r = _mm_or_ps(_mm_castsi128_ps(_mm_set1_epi32(0x40000000)), r);
if(add)
sum = _mm_add_ps(sum, r);
else
sum = r;
r = _mm_mul_ps(r, _mm_set1_ps(536870880.0f)); // 35
return _mm_cvttps_epi32(r);
}
template<size_t rot>
inline void single_compute_wrap(__m128 n0, __m128 n1, __m128 n2, __m128 n3, float cnt, __m128 rnd_c, __m128& sum, __m128i& out)
{
__m128i r = single_compute<rot % 2 != 0>(n0, n1, n2, n3, cnt, rnd_c, sum);
if(rot != 0)
r = _mm_or_si128(_mm_slli_si128(r, 16 - rot), _mm_srli_si128(r, rot));
out = _mm_xor_si128(out, r);
}
template<uint32_t MASK>
inline __m128i* scratchpad_ptr(uint8_t* lpad, uint32_t idx, size_t n) { return reinterpret_cast<__m128i*>(lpad + (idx & MASK) + n*16); }
template<size_t ITER, uint32_t MASK>
void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad)
{
uint32_t s = reinterpret_cast<const uint32_t*>(spad)[0] >> 8;
__m128i* idx0 = scratchpad_ptr<MASK>(lpad, s, 0);
__m128i* idx1 = scratchpad_ptr<MASK>(lpad, s, 1);
__m128i* idx2 = scratchpad_ptr<MASK>(lpad, s, 2);
__m128i* idx3 = scratchpad_ptr<MASK>(lpad, s, 3);
__m128 sum0 = _mm_setzero_ps();
for(size_t i = 0; i < ITER; i++)
{
__m128 n0, n1, n2, n3;
__m128i v0, v1, v2, v3;
__m128 suma, sumb, sum1, sum2, sum3;
prep_dv(idx0, v0, n0);
prep_dv(idx1, v1, n1);
prep_dv(idx2, v2, n2);
prep_dv(idx3, v3, n3);
__m128 rc = sum0;
__m128i out, out2;
out = _mm_setzero_si128();
single_compute_wrap<0>(n0, n1, n2, n3, 1.3437500f, rc, suma, out);
single_compute_wrap<1>(n0, n2, n3, n1, 1.2812500f, rc, suma, out);
single_compute_wrap<2>(n0, n3, n1, n2, 1.3593750f, rc, sumb, out);
single_compute_wrap<3>(n0, n3, n2, n1, 1.3671875f, rc, sumb, out);
sum0 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx0, _mm_xor_si128(v0, out));
out2 = out;
out = _mm_setzero_si128();
single_compute_wrap<0>(n1, n0, n2, n3, 1.4296875f, rc, suma, out);
single_compute_wrap<1>(n1, n2, n3, n0, 1.3984375f, rc, suma, out);
single_compute_wrap<2>(n1, n3, n0, n2, 1.3828125f, rc, sumb, out);
single_compute_wrap<3>(n1, n3, n2, n0, 1.3046875f, rc, sumb, out);
sum1 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx1, _mm_xor_si128(v1, out));
out2 = _mm_xor_si128(out2, out);
out = _mm_setzero_si128();
single_compute_wrap<0>(n2, n1, n0, n3, 1.4140625f, rc, suma, out);
single_compute_wrap<1>(n2, n0, n3, n1, 1.2734375f, rc, suma, out);
single_compute_wrap<2>(n2, n3, n1, n0, 1.2578125f, rc, sumb, out);
single_compute_wrap<3>(n2, n3, n0, n1, 1.2890625f, rc, sumb, out);
sum2 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx2, _mm_xor_si128(v2, out));
out2 = _mm_xor_si128(out2, out);
out = _mm_setzero_si128();
single_compute_wrap<0>(n3, n1, n2, n0, 1.3203125f, rc, suma, out);
single_compute_wrap<1>(n3, n2, n0, n1, 1.3515625f, rc, suma, out);
single_compute_wrap<2>(n3, n0, n1, n2, 1.3359375f, rc, sumb, out);
single_compute_wrap<3>(n3, n0, n2, n1, 1.4609375f, rc, sumb, out);
sum3 = _mm_add_ps(suma, sumb);
_mm_store_si128(idx3, _mm_xor_si128(v3, out));
out2 = _mm_xor_si128(out2, out);
sum0 = _mm_add_ps(sum0, sum1);
sum2 = _mm_add_ps(sum2, sum3);
sum0 = _mm_add_ps(sum0, sum2);
sum0 = _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)), sum0); // take abs(va) by masking the float sign bit
// vs range 0 - 64
n0 = _mm_mul_ps(sum0, _mm_set1_ps(16777216.0f));
v0 = _mm_cvttps_epi32(n0);
v0 = _mm_xor_si128(v0, out2);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 2, 3));
v0 = _mm_xor_si128(v0, v1);
v1 = _mm_shuffle_epi32(v0, _MM_SHUFFLE(0, 1, 0, 1));
v0 = _mm_xor_si128(v0, v1);
// vs is now between 0 and 1
sum0 = _mm_div_ps(sum0, _mm_set1_ps(64.0f));
uint32_t n = _mm_cvtsi128_si32(v0);
idx0 = scratchpad_ptr<MASK>(lpad, n, 0);
idx1 = scratchpad_ptr<MASK>(lpad, n, 1);
idx2 = scratchpad_ptr<MASK>(lpad, n, 2);
idx3 = scratchpad_ptr<MASK>(lpad, n, 3);
}
}
template void cn_gpu_inner_ssse3<xmrig::CRYPTONIGHT_GPU_ITER, xmrig::CRYPTONIGHT_GPU_MASK>(const uint8_t* spad, uint8_t* lpad);

View file

@ -250,6 +250,21 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
# ifndef XMRIG_NO_CN_GPU
cryptonight_single_hash_gpu<CRYPTONIGHT, false, VARIANT_GPU>,
nullptr,
cryptonight_single_hash_gpu<CRYPTONIGHT, true, VARIANT_GPU>,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
# else
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# endif
# ifndef XMRIG_NO_AEON
cryptonight_single_hash<CRYPTONIGHT_LITE, false, VARIANT_0>,
cryptonight_double_hash<CRYPTONIGHT_LITE, false, VARIANT_0>,
@ -282,6 +297,7 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# else
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1
@ -294,6 +310,7 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# endif
# ifndef XMRIG_NO_SUMO
@ -340,6 +357,7 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# else
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1
@ -352,6 +370,7 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# endif
# ifndef XMRIG_NO_CN_PICO
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0
@ -375,6 +394,8 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
cryptonight_triple_hash<CRYPTONIGHT_PICO, true, VARIANT_TRTL>,
cryptonight_quad_hash<CRYPTONIGHT_PICO, true, VARIANT_TRTL>,
cryptonight_penta_hash<CRYPTONIGHT_PICO, true, VARIANT_TRTL>,
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# else
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1
@ -387,6 +408,7 @@ xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant a
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU
# endif
# ifndef XMRIG_NO_ASM
cryptonight_single_hash_asm<CRYPTONIGHT, VARIANT_2, ASM_INTEL>,

View file

@ -61,6 +61,9 @@ bool MultiWorker<N>::selfTest()
verify(VARIANT_MSR, test_output_msr) &&
verify(VARIANT_XAO, test_output_xao) &&
verify(VARIANT_RTO, test_output_rto) &&
# ifndef XMRIG_NO_CN_GPU
verify(VARIANT_GPU, test_output_gpu) &&
# endif
verify(VARIANT_HALF, test_output_half);
}