From 915729bec69e39a6c3c49ba9def5ddaa834b1fee Mon Sep 17 00:00:00 2001 From: SChernykh Date: Mon, 1 Jul 2019 20:48:51 +0200 Subject: [PATCH] Fixed MingGW compilation --- CMakeLists.txt | 5 ++++- cmake/flags.cmake | 2 +- src/crypto/randomx/bytecode_machine.hpp | 6 +++--- src/crypto/randomx/common.hpp | 14 +++++++------- src/crypto/randomx/instructions_portable.cpp | 20 ++++++++++---------- src/crypto/randomx/intrin_portable.h | 4 ++-- src/crypto/randomx/superscalar.cpp | 2 +- src/workers/Workers.cpp | 3 +++ 8 files changed, 31 insertions(+), 25 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 57af7068c..7e3cf2a58 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -185,13 +185,16 @@ if (WITH_RANDOMX) src/crypto/randomx/vm_compiled_light.cpp src/crypto/randomx/blake2/blake2b.c ) + if (NOT ARCH_ID) + set(ARCH_ID ${CMAKE_HOST_SYSTEM_PROCESSOR}) + endif() if (CMAKE_C_COMPILER_ID MATCHES MSVC) enable_language(ASM_MASM) list(APPEND SOURCES_CRYPTO src/crypto/randomx/jit_compiler_x86_static.asm src/crypto/randomx/jit_compiler_x86.cpp ) - elseif (ARCH_ID STREQUAL "x86_64" OR ARCH_ID STREQUAL "x86-64" OR ARCH_ID STREQUAL "amd64") + elseif (ARCH_ID STREQUAL "x86_64" OR ARCH_ID STREQUAL "x86-64" OR ARCH_ID STREQUAL "amd64" OR ARCH_ID STREQUAL "AMD64") list(APPEND SOURCES_CRYPTO src/crypto/randomx/jit_compiler_x86_static.S src/crypto/randomx/jit_compiler_x86.cpp diff --git a/cmake/flags.cmake b/cmake/flags.cmake index d00366282..3a0add7a8 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -17,7 +17,7 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-strict-aliasing") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Ofast") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fno-exceptions -fno-rtti -Wno-class-memaccess") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti -Wno-class-memaccess") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast -s") if (XMRIG_ARMv8) diff --git a/src/crypto/randomx/bytecode_machine.hpp b/src/crypto/randomx/bytecode_machine.hpp index c4175b702..3b05f378d 100644 --- a/src/crypto/randomx/bytecode_machine.hpp +++ b/src/crypto/randomx/bytecode_machine.hpp @@ -161,11 +161,11 @@ namespace randomx { } static void exe_IROR_R(RANDOMX_EXE_ARGS) { - *ibc.idst = rotr(*ibc.idst, *ibc.isrc & 63); + *ibc.idst = rotr64(*ibc.idst, *ibc.isrc & 63); } static void exe_IROL_R(RANDOMX_EXE_ARGS) { - *ibc.idst = rotl(*ibc.idst, *ibc.isrc & 63); + *ibc.idst = rotl64(*ibc.idst, *ibc.isrc & 63); } static void exe_ISWAP_R(RANDOMX_EXE_ARGS) { @@ -225,7 +225,7 @@ namespace randomx { } static void exe_CFROUND(RANDOMX_EXE_ARGS) { - rx_set_rounding_mode(rotr(*ibc.isrc, ibc.imm) % 4); + rx_set_rounding_mode(rotr64(*ibc.isrc, ibc.imm) % 4); } static void exe_ISTORE(RANDOMX_EXE_ARGS) { diff --git a/src/crypto/randomx/common.hpp b/src/crypto/randomx/common.hpp index 6f60f606f..7f7ea0edb 100644 --- a/src/crypto/randomx/common.hpp +++ b/src/crypto/randomx/common.hpp @@ -59,13 +59,13 @@ namespace randomx { //static_assert(RANDOMX_JUMP_OFFSET >= 0, "RANDOMX_JUMP_OFFSET must be greater than or equal to 0."); //static_assert(RANDOMX_JUMP_BITS + RANDOMX_JUMP_OFFSET <= 16, "RANDOMX_JUMP_BITS + RANDOMX_JUMP_OFFSET must not exceed 16."); - //constexpr int wtSum = RANDOMX_FREQ_IADD_RS + RANDOMX_FREQ_IADD_M + RANDOMX_FREQ_ISUB_R + \ - // RANDOMX_FREQ_ISUB_M + RANDOMX_FREQ_IMUL_R + RANDOMX_FREQ_IMUL_M + RANDOMX_FREQ_IMULH_R + \ - // RANDOMX_FREQ_IMULH_M + RANDOMX_FREQ_ISMULH_R + RANDOMX_FREQ_ISMULH_M + RANDOMX_FREQ_IMUL_RCP + \ - // RANDOMX_FREQ_INEG_R + RANDOMX_FREQ_IXOR_R + RANDOMX_FREQ_IXOR_M + RANDOMX_FREQ_IROR_R + RANDOMX_FREQ_IROL_R + RANDOMX_FREQ_ISWAP_R + \ - // RANDOMX_FREQ_FSWAP_R + RANDOMX_FREQ_FADD_R + RANDOMX_FREQ_FADD_M + RANDOMX_FREQ_FSUB_R + RANDOMX_FREQ_FSUB_M + \ - // RANDOMX_FREQ_FSCAL_R + RANDOMX_FREQ_FMUL_R + RANDOMX_FREQ_FDIV_M + RANDOMX_FREQ_FSQRT_R + RANDOMX_FREQ_CBRANCH + \ - // RANDOMX_FREQ_CFROUND + RANDOMX_FREQ_ISTORE + RANDOMX_FREQ_NOP; + /*constexpr int wtSum = RANDOMX_FREQ_IADD_RS + RANDOMX_FREQ_IADD_M + RANDOMX_FREQ_ISUB_R + \ + RANDOMX_FREQ_ISUB_M + RANDOMX_FREQ_IMUL_R + RANDOMX_FREQ_IMUL_M + RANDOMX_FREQ_IMULH_R + \ + RANDOMX_FREQ_IMULH_M + RANDOMX_FREQ_ISMULH_R + RANDOMX_FREQ_ISMULH_M + RANDOMX_FREQ_IMUL_RCP + \ + RANDOMX_FREQ_INEG_R + RANDOMX_FREQ_IXOR_R + RANDOMX_FREQ_IXOR_M + RANDOMX_FREQ_IROR_R + RANDOMX_FREQ_IROL_R + RANDOMX_FREQ_ISWAP_R + \ + RANDOMX_FREQ_FSWAP_R + RANDOMX_FREQ_FADD_R + RANDOMX_FREQ_FADD_M + RANDOMX_FREQ_FSUB_R + RANDOMX_FREQ_FSUB_M + \ + RANDOMX_FREQ_FSCAL_R + RANDOMX_FREQ_FMUL_R + RANDOMX_FREQ_FDIV_M + RANDOMX_FREQ_FSQRT_R + RANDOMX_FREQ_CBRANCH + \ + RANDOMX_FREQ_CFROUND + RANDOMX_FREQ_ISTORE + RANDOMX_FREQ_NOP;*/ //static_assert(wtSum == 256, "Sum of instruction frequencies must be 256."); diff --git a/src/crypto/randomx/instructions_portable.cpp b/src/crypto/randomx/instructions_portable.cpp index 797c84c4a..8c466ebe5 100644 --- a/src/crypto/randomx/instructions_portable.cpp +++ b/src/crypto/randomx/instructions_portable.cpp @@ -51,14 +51,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include - uint64_t rotl(uint64_t x, unsigned int c) { + uint64_t rotl64(uint64_t x, unsigned int c) { return _rotl64(x, c); } - uint64_t rotr(uint64_t x, unsigned int c) { + uint64_t rotr64(uint64_t x, unsigned int c) { return _rotr64(x, c); } - #define HAVE_ROTL - #define HAVE_ROTR + #define HAVE_ROTL64 + #define HAVE_ROTR64 #if EVAL_DEFINE(__MACHINEARM64_X64(1)) uint64_t mulh(uint64_t a, uint64_t b) { @@ -88,18 +88,18 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. } #endif -#ifndef HAVE_ROTR - uint64_t rotr(uint64_t a, unsigned int b) { +#ifndef HAVE_ROTR64 + uint64_t rotr64(uint64_t a, unsigned int b) { return (a >> b) | (a << (-b & 63)); } - #define HAVE_ROTR + #define HAVE_ROTR64 #endif -#ifndef HAVE_ROTL - uint64_t rotl(uint64_t a, unsigned int b) { +#ifndef HAVE_ROTL64 + uint64_t rotl64(uint64_t a, unsigned int b) { return (a << b) | (a >> (-b & 63)); } - #define HAVE_ROTL + #define HAVE_ROTL64 #endif #ifndef HAVE_MULH diff --git a/src/crypto/randomx/intrin_portable.h b/src/crypto/randomx/intrin_portable.h index b4f1b503e..83acbe65d 100644 --- a/src/crypto/randomx/intrin_portable.h +++ b/src/crypto/randomx/intrin_portable.h @@ -601,5 +601,5 @@ FORCE_INLINE rx_vec_i128 rx_aesdec_vec_i128(rx_vec_i128 v, rx_vec_i128 rkey) { double loadDoublePortable(const void* addr); uint64_t mulh(uint64_t, uint64_t); int64_t smulh(int64_t, int64_t); -uint64_t rotl(uint64_t, unsigned int); -uint64_t rotr(uint64_t, unsigned int); +uint64_t rotl64(uint64_t, unsigned int); +uint64_t rotr64(uint64_t, unsigned int); diff --git a/src/crypto/randomx/superscalar.cpp b/src/crypto/randomx/superscalar.cpp index c0d496b96..da6056228 100644 --- a/src/crypto/randomx/superscalar.cpp +++ b/src/crypto/randomx/superscalar.cpp @@ -859,7 +859,7 @@ namespace randomx { r[instr.dst] *= r[instr.src]; break; case SuperscalarInstructionType::IROR_C: - r[instr.dst] = rotr(r[instr.dst], instr.getImm32()); + r[instr.dst] = rotr64(r[instr.dst], instr.getImm32()); break; case SuperscalarInstructionType::IADD_C7: case SuperscalarInstructionType::IADD_C8: diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index f3d2a6632..a913ee518 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -410,6 +410,9 @@ void Workers::updateDataset(const uint8_t* seed_hash, xmrig::Variant variant, co case xmrig::VARIANT_RX_LOKI: randomx_apply_config(RandomX_LokiConfig); break; + default: + randomx_apply_config(RandomX_MoneroConfig); + break; } m_rx_variant = variant; }