mirror of
https://github.com/SChernykh/p2pool.git
synced 2024-11-16 15:57:39 +00:00
Merge branch 'hardfork'
This commit is contained in:
commit
14f7a1cb61
42 changed files with 1244 additions and 627 deletions
84
.github/workflows/c-cpp.yml
vendored
84
.github/workflows/c-cpp.yml
vendored
|
@ -70,7 +70,7 @@ jobs:
|
|||
run: |
|
||||
cd external/src/curl
|
||||
autoreconf -fi
|
||||
./configure --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
./configure CFLAGS='-Os' --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libuv
|
||||
|
@ -78,7 +78,7 @@ jobs:
|
|||
cd external/src/libuv
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_TESTING=OFF
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DBUILD_TESTING=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libzmq
|
||||
|
@ -86,7 +86,7 @@ jobs:
|
|||
cd external/src/libzmq
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DCMAKE_CXX_FLAGS='-Os' -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build p2pool
|
||||
|
@ -138,7 +138,7 @@ jobs:
|
|||
run: |
|
||||
cd external/src/curl
|
||||
autoreconf -fi
|
||||
./configure --host=aarch64-linux-gnu --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
./configure --host=aarch64-linux-gnu CFLAGS='-Os' --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libuv
|
||||
|
@ -146,7 +146,7 @@ jobs:
|
|||
cd external/src/libuv
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DBUILD_TESTING=OFF
|
||||
cmake .. -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DCMAKE_C_FLAGS='-Os' -DBUILD_TESTING=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libzmq
|
||||
|
@ -154,7 +154,7 @@ jobs:
|
|||
cd external/src/libzmq
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
cmake .. -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DCMAKE_C_FLAGS='-Os' -DCMAKE_CXX_FLAGS='-Os' -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build p2pool
|
||||
|
@ -194,7 +194,7 @@ jobs:
|
|||
run: |
|
||||
cd external/src/curl
|
||||
autoreconf -fi
|
||||
./configure --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
./configure CFLAGS='-Os' --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libuv
|
||||
|
@ -202,7 +202,7 @@ jobs:
|
|||
cd external/src/libuv
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G "Unix Makefiles" -DBUILD_TESTING=OFF
|
||||
cmake .. -G "Unix Makefiles" -DCMAKE_C_FLAGS='-Os' -DBUILD_TESTING=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build libzmq
|
||||
|
@ -210,7 +210,7 @@ jobs:
|
|||
cd external/src/libzmq
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G "Unix Makefiles" -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF -DZMQ_HAVE_IPC=OFF
|
||||
cmake .. -G "Unix Makefiles" -DCMAKE_C_FLAGS='-Os' -DCMAKE_CXX_FLAGS='-Os' -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF -DZMQ_HAVE_IPC=OFF
|
||||
make -j$(nproc)
|
||||
|
||||
- name: Build p2pool
|
||||
|
@ -306,7 +306,7 @@ jobs:
|
|||
run: |
|
||||
cd external/src/curl
|
||||
autoreconf -fi
|
||||
./configure --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
./configure CFLAGS='-Os' --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
make -j3
|
||||
|
||||
- name: Build libuv
|
||||
|
@ -314,7 +314,7 @@ jobs:
|
|||
cd external/src/libuv
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_TESTING=OFF
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DBUILD_TESTING=OFF
|
||||
make -j3
|
||||
|
||||
- name: Build libzmq
|
||||
|
@ -322,7 +322,7 @@ jobs:
|
|||
cd external/src/libzmq
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DCMAKE_CXX_FLAGS='-Os' -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
make -j3
|
||||
|
||||
- name: Build p2pool
|
||||
|
@ -350,3 +350,63 @@ jobs:
|
|||
with:
|
||||
name: p2pool-${{ matrix.os }}
|
||||
path: build/p2pool
|
||||
|
||||
build-freebsd:
|
||||
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ matrix.os.host }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- name: freebsd
|
||||
architecture: x86-64
|
||||
version: '12.4'
|
||||
host: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Build p2pool
|
||||
uses: cross-platform-actions/action@v0.10.0
|
||||
with:
|
||||
operating_system: ${{ matrix.os.name }}
|
||||
architecture: ${{ matrix.os.architecture }}
|
||||
version: ${{ matrix.os.version }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo pkg install -y cmake autoconf automake libtool
|
||||
cd external/src/curl
|
||||
autoreconf -fi
|
||||
./configure CFLAGS='-Os' --without-ssl --without-hyper --without-zlib --without-brotli --without-zstd --without-default-ssl-backend --without-ca-bundle --without-ca-path --without-ca-fallback --without-libpsl --without-libgsasl --without-librtmp --without-winidn --without-libidn2 --without-nghttp2 --without-ngtcp2 --without-nghttp3 --without-quiche --without-msh3 --without-zsh-functions-dir --without-fish-functions-dir --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-mqtt --disable-manual --disable-ntlm --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies --disable-socketpair --disable-doh --disable-dateparse --disable-netrc --disable-progress-meter --disable-dnsshuffle --disable-hsts --disable-alt-svc --disable-ares
|
||||
make -j2
|
||||
cd ../libuv
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DBUILD_TESTING=OFF
|
||||
make -j2
|
||||
cd ../../libzmq
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_C_FLAGS='-Os' -DCMAKE_CXX_FLAGS='-Os' -DWITH_TLS=OFF -DWITH_LIBSODIUM=OFF -DWITH_LIBBSD=OFF -DBUILD_TESTS=OFF
|
||||
make -j2
|
||||
cd ../../../..
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DSTATIC_BINARY=ON
|
||||
make -j2
|
||||
cd ../tests
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DSTATIC_LIBS=ON
|
||||
make -j2
|
||||
./p2pool_tests
|
||||
|
||||
- name: Archive binary
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: p2pool-${{ matrix.os.name }}-${{ matrix.os.version }}
|
||||
path: build/p2pool
|
||||
|
|
27
.github/workflows/test-sync.yml
vendored
27
.github/workflows/test-sync.yml
vendored
|
@ -30,13 +30,16 @@ jobs:
|
|||
timeout-minutes: 15
|
||||
run: |
|
||||
cd build
|
||||
./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 6
|
||||
mkdir data
|
||||
./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --data-api data --local-api --loglevel 6
|
||||
|
||||
- name: Archive p2pool.log
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: p2pool_ubuntu.log
|
||||
path: build/p2pool.log
|
||||
name: p2pool_ubuntu_data
|
||||
path: |
|
||||
build/p2pool.log
|
||||
build/data/
|
||||
|
||||
sync-test-macos:
|
||||
|
||||
|
@ -62,13 +65,16 @@ jobs:
|
|||
timeout-minutes: 15
|
||||
run: |
|
||||
cd build
|
||||
./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 6
|
||||
mkdir data
|
||||
./p2pool --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --data-api data --local-api --loglevel 6
|
||||
|
||||
- name: Archive p2pool.log
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: p2pool_macos.log
|
||||
path: build/p2pool.log
|
||||
name: p2pool_macos_data
|
||||
path: |
|
||||
build/p2pool.log
|
||||
build/data/
|
||||
|
||||
sync-test-windows:
|
||||
|
||||
|
@ -94,10 +100,13 @@ jobs:
|
|||
timeout-minutes: 15
|
||||
run: |
|
||||
cd build/Debug
|
||||
./p2pool.exe --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --loglevel 6
|
||||
mkdir data
|
||||
./p2pool.exe --host p2pmd.xmrvsbeast.com --zmq-port 18084 --wallet 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg --no-cache --data-api data --local-api --loglevel 6
|
||||
|
||||
- name: Archive p2pool.log
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: p2pool_windows.log
|
||||
path: build/Debug/p2pool.log
|
||||
name: p2pool_windows_data
|
||||
path: |
|
||||
build/Debug/p2pool.log
|
||||
build/Debug/data/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
cmake_minimum_required(VERSION 3.6)
|
||||
cmake_minimum_required(VERSION 2.8.12)
|
||||
project(p2pool)
|
||||
|
||||
option(STATIC_BINARY "Build static binary" OFF)
|
||||
|
@ -9,7 +9,9 @@ option(DEV_TEST_SYNC "[Developer only] Sync test, stop p2pool after sync is comp
|
|||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
|
||||
|
||||
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT p2pool)
|
||||
if (${CMAKE_VERSION} VERSION_GREATER "3.5.2")
|
||||
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT p2pool)
|
||||
endif()
|
||||
|
||||
if (WITH_RANDOMX)
|
||||
add_definitions(-DWITH_RANDOMX)
|
||||
|
@ -124,7 +126,7 @@ if (WIN32)
|
|||
elseif (CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
|
||||
set(LIBS ${LIBS} pthread)
|
||||
elseif (NOT APPLE)
|
||||
set(LIBS ${LIBS} pthread dl)
|
||||
set(LIBS ${LIBS} pthread)
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
|
||||
|
@ -226,7 +228,7 @@ if (STATIC_BINARY OR STATIC_LIBS)
|
|||
find_library(SYSTEM_CONFIGURATION_LIB SystemConfiguration)
|
||||
set(STATIC_LIBS ${STATIC_LIBS} ${FOUNDATION_LIB} ${CORE_FOUNDATION_LIB} ${SYSTEM_CONFIGURATION_LIB})
|
||||
else()
|
||||
set(STATIC_LIBS ${STATIC_LIBS} pthread dl)
|
||||
set(STATIC_LIBS ${STATIC_LIBS} pthread)
|
||||
endif()
|
||||
|
||||
target_link_libraries(${CMAKE_PROJECT_NAME}
|
||||
|
|
|
@ -2,7 +2,13 @@
|
|||
|
||||
Decentralized pool for Monero mining.
|
||||
|
||||
Pool status and monitoring pages can be found at https://p2pool.io/, https://p2pool.io/mini/ and https://p2pool.observer/
|
||||
Pool status and monitoring pages can be found at https://p2pool.io/, https://p2pool.io/mini/ and https://p2pool.observer/, https://mini.p2pool.observer/
|
||||
|
||||
# P2Pool hardfork on March 18, 2023
|
||||
|
||||
P2Pool (not Monero!) will hardfork to new consensus rules on March 18th at 21:00 UTC (use [this link](https://dateful.com/convert/utc?t=9pm&d=2023-03-18) to convert to your local time). The hardfork is necessary to improve P2Pool scalability and reduce the impact on Monero blockchain. You'll need to update to **P2Pool v3.0** or newer version before this time.
|
||||
|
||||
Reddit discussion: https://www.reddit.com/r/MoneroMining/comments/1095730/psa_p2pool_network_upgrade_aka_hardfork_on_march/
|
||||
|
||||
### Build Status
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ set(CMAKE_C_STANDARD_REQUIRED ON)
|
|||
if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
|
||||
set(GENERAL_FLAGS "-pthread")
|
||||
set(WARNING_FLAGS "-Wall -Wextra -Wcast-align -Wcast-qual -Wlogical-op -Wstrict-overflow=2 -Wundef -Wformat=2 -Wpointer-arith -Werror")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -s")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -s -flto -fuse-linker-plugin")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
|
@ -42,7 +42,7 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
|
|||
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
|
||||
set(GENERAL_FLAGS "-pthread")
|
||||
set(WARNING_FLAGS "-Wall -Wextra -Wno-undefined-internal -Wunreachable-code-aggressive -Wmissing-prototypes -Wmissing-variable-declarations -Werror")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -funroll-loops -fmerge-all-constants")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -funroll-loops -fmerge-all-constants -flto")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
|
|
2
external/src/RandomX
vendored
2
external/src/RandomX
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 89031917e9780b3ea0439bf362446efe5b59244c
|
||||
Subproject commit 58c3943ff5699ebd36e178b8d4c6183bcb82419c
|
2
external/src/libuv
vendored
2
external/src/libuv
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 0c1fa696aa502eb749c2c4735005f41ba00a27b8
|
||||
Subproject commit 55077af4b50e13c5b7c02131ff16695685478fc8
|
|
@ -55,10 +55,12 @@ BlockTemplate::BlockTemplate(SideChain* sidechain, RandomX_Hasher_Base* hasher)
|
|||
, m_difficulty{}
|
||||
, m_seedHash{}
|
||||
, m_timestamp(0)
|
||||
, m_txkeyPub{}
|
||||
, m_txkeySec{}
|
||||
, m_poolBlockTemplate(new PoolBlock())
|
||||
, m_finalReward(0)
|
||||
, m_minerTxKeccakState{}
|
||||
, m_minerTxKeccakStateInputLength(0)
|
||||
, m_sidechainHashKeccakState{}
|
||||
, m_sidechainHashInputLength(0)
|
||||
, m_rng(RandomDeviceSeed::instance)
|
||||
{
|
||||
// Diffuse the initial state in case it has low quality
|
||||
|
@ -72,6 +74,8 @@ BlockTemplate::BlockTemplate(SideChain* sidechain, RandomX_Hasher_Base* hasher)
|
|||
m_transactionHashes.reserve(8192);
|
||||
m_rewards.reserve(100);
|
||||
m_blockTemplateBlob.reserve(65536);
|
||||
m_fullDataBlob.reserve(65536);
|
||||
m_sidechainHashBlob.reserve(65536);
|
||||
m_merkleTreeMainBranch.reserve(HASH_SIZE * 10);
|
||||
m_mempoolTxs.reserve(1024);
|
||||
m_mempoolTxsOrder.reserve(1024);
|
||||
|
@ -119,6 +123,8 @@ BlockTemplate& BlockTemplate::operator=(const BlockTemplate& b)
|
|||
m_templateId = b.m_templateId;
|
||||
m_lastUpdated = b.m_lastUpdated.load();
|
||||
m_blockTemplateBlob = b.m_blockTemplateBlob;
|
||||
m_fullDataBlob = b.m_fullDataBlob;
|
||||
m_sidechainHashBlob = b.m_sidechainHashBlob;
|
||||
m_merkleTreeMainBranch = b.m_merkleTreeMainBranch;
|
||||
m_blockHeaderSize = b.m_blockHeaderSize;
|
||||
m_minerTxOffsetInTemplate = b.m_minerTxOffsetInTemplate;
|
||||
|
@ -131,10 +137,14 @@ BlockTemplate& BlockTemplate::operator=(const BlockTemplate& b)
|
|||
m_difficulty = b.m_difficulty;
|
||||
m_seedHash = b.m_seedHash;
|
||||
m_timestamp = b.m_timestamp;
|
||||
m_txkeyPub = b.m_txkeyPub;
|
||||
m_txkeySec = b.m_txkeySec;
|
||||
*m_poolBlockTemplate = *b.m_poolBlockTemplate;
|
||||
m_finalReward = b.m_finalReward;
|
||||
m_finalReward = b.m_finalReward.load();
|
||||
|
||||
memcpy(m_minerTxKeccakState, b.m_minerTxKeccakState, sizeof(m_minerTxKeccakState));
|
||||
m_minerTxKeccakStateInputLength = b.m_minerTxKeccakStateInputLength;
|
||||
|
||||
memcpy(m_sidechainHashKeccakState, b.m_sidechainHashKeccakState, sizeof(m_sidechainHashKeccakState));
|
||||
m_sidechainHashInputLength = b.m_sidechainHashInputLength;
|
||||
|
||||
m_minerTx.clear();
|
||||
m_blockHeader.clear();
|
||||
|
@ -188,9 +198,12 @@ static FORCEINLINE uint64_t get_block_reward(uint64_t base_reward, uint64_t medi
|
|||
|
||||
void BlockTemplate::shuffle_tx_order()
|
||||
{
|
||||
const int64_t n = static_cast<int64_t>(m_mempoolTxsOrder.size());
|
||||
for (int64_t i = n - 1; i > 0; --i) {
|
||||
std::swap(m_mempoolTxsOrder[i], m_mempoolTxsOrder[m_rng() % (i + 1)]);
|
||||
const uint64_t n = m_mempoolTxsOrder.size();
|
||||
if (n > 1) {
|
||||
for (uint64_t i = 0, k; i < n - 1; ++i) {
|
||||
umul128(m_rng(), n - i, &k);
|
||||
std::swap(m_mempoolTxsOrder[i], m_mempoolTxsOrder[i + k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,8 +234,6 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
|
|||
*this = *m_oldTemplates[id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
};
|
||||
|
||||
get_tx_keys(m_txkeyPub, m_txkeySec, miner_wallet->spend_public_key(), data.prev_id);
|
||||
|
||||
m_height = data.height;
|
||||
m_difficulty = data.difficulty;
|
||||
m_seedHash = data.seed_hash;
|
||||
|
@ -256,11 +267,74 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
|
|||
m_blockHeader.insert(m_blockHeader.end(), NONCE_SIZE, 0);
|
||||
m_poolBlockTemplate->m_nonce = 0;
|
||||
|
||||
// Fill in m_txinGenHeight here so get_shares() can use it to calculate the correct PPLNS window
|
||||
m_poolBlockTemplate->m_txinGenHeight = data.height;
|
||||
|
||||
m_blockHeaderSize = m_blockHeader.size();
|
||||
|
||||
m_sidechain->fill_sidechain_data(*m_poolBlockTemplate, miner_wallet, m_txkeySec, m_shares);
|
||||
const int sidechain_version = m_poolBlockTemplate->get_sidechain_version();
|
||||
|
||||
// Only choose transactions that were received 10 or more seconds ago, or high fee (>= 0.006 XMR) transactions
|
||||
if (sidechain_version <= 1) {
|
||||
get_tx_keys(m_poolBlockTemplate->m_txkeyPub, m_poolBlockTemplate->m_txkeySec, miner_wallet->spend_public_key(), data.prev_id);
|
||||
// Both values are the same before v2
|
||||
m_poolBlockTemplate->m_txkeySecSeed = m_poolBlockTemplate->m_txkeySec;
|
||||
}
|
||||
|
||||
m_poolBlockTemplate->m_minerWallet = *miner_wallet;
|
||||
|
||||
m_sidechain->fill_sidechain_data(*m_poolBlockTemplate, m_shares);
|
||||
|
||||
// Pre-calculate outputs to speed up miner tx generation
|
||||
if (!m_shares.empty()) {
|
||||
struct Precalc
|
||||
{
|
||||
FORCEINLINE Precalc(const std::vector<MinerShare>& s, const hash& k) : txKeySec(k)
|
||||
{
|
||||
const size_t N = s.size();
|
||||
counter = static_cast<int>(N) - 1;
|
||||
shares = reinterpret_cast<std::pair<hash, hash>*>(malloc_hook(sizeof(std::pair<hash, hash>) * N));
|
||||
if (shares) {
|
||||
const MinerShare* src = &s[0];
|
||||
std::pair<hash, hash>* dst = shares;
|
||||
std::pair<hash, hash>* e = shares + N;
|
||||
|
||||
for (; dst < e; ++src, ++dst) {
|
||||
const Wallet* w = src->m_wallet;
|
||||
dst->first = w->view_public_key();
|
||||
dst->second = w->spend_public_key();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FORCEINLINE Precalc(Precalc&& rhs) noexcept : txKeySec(rhs.txKeySec), counter(rhs.counter.load()), shares(rhs.shares) { rhs.shares = nullptr; }
|
||||
FORCEINLINE ~Precalc() { free_hook(shares); }
|
||||
|
||||
// Disable any other way of copying/moving Precalc
|
||||
Precalc(const Precalc&) = delete;
|
||||
Precalc& operator=(const Precalc&) = delete;
|
||||
Precalc& operator=(Precalc&&) = delete;
|
||||
|
||||
FORCEINLINE void operator()()
|
||||
{
|
||||
if (shares) {
|
||||
hash derivation, eph_public_key;
|
||||
int i;
|
||||
while ((i = counter.fetch_sub(1)) >= 0) {
|
||||
uint8_t view_tag;
|
||||
generate_key_derivation(shares[i].first, txKeySec, i, derivation, view_tag);
|
||||
derive_public_key(derivation, i, shares[i].second, eph_public_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hash txKeySec;
|
||||
std::atomic<int> counter;
|
||||
std::pair<hash, hash>* shares;
|
||||
};
|
||||
parallel_run(uv_default_loop_checked(), Precalc(m_shares, m_poolBlockTemplate->m_txkeySec));
|
||||
}
|
||||
|
||||
// Only choose transactions that were received 5 or more seconds ago, or high fee (>= 0.006 XMR) transactions
|
||||
size_t total_mempool_transactions;
|
||||
{
|
||||
m_mempoolTxs.clear();
|
||||
|
@ -272,7 +346,7 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
|
|||
const uint64_t cur_time = seconds_since_epoch();
|
||||
|
||||
for (auto& it : mempool.m_transactions) {
|
||||
if ((cur_time >= it.second.time_received + 10) || (it.second.fee >= HIGH_FEE_VALUE)) {
|
||||
if ((cur_time > it.second.time_received + 5) || (it.second.fee >= HIGH_FEE_VALUE)) {
|
||||
m_mempoolTxs.emplace_back(it.second);
|
||||
}
|
||||
}
|
||||
|
@ -570,43 +644,91 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
|
|||
|
||||
m_poolBlockTemplate->m_minerWallet = *miner_wallet;
|
||||
|
||||
m_poolBlockTemplate->serialize_sidechain_data();
|
||||
m_poolBlockTemplate->m_sidechainId = calc_sidechain_hash();
|
||||
const int sidechain_hash_offset = static_cast<int>(m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize) + 2;
|
||||
// Layout: [software id, version, random number, sidechain extra_nonce]
|
||||
uint32_t* sidechain_extra = m_poolBlockTemplate->m_sidechainExtraBuf;
|
||||
sidechain_extra[0] = 0;
|
||||
sidechain_extra[1] = (P2POOL_VERSION_MAJOR << 16) | P2POOL_VERSION_MINOR;
|
||||
sidechain_extra[2] = static_cast<uint32_t>(m_rng() >> 32);
|
||||
sidechain_extra[3] = 0;
|
||||
|
||||
m_poolBlockTemplate->m_nonce = 0;
|
||||
m_poolBlockTemplate->m_extraNonce = 0;
|
||||
m_poolBlockTemplate->m_sidechainId = {};
|
||||
|
||||
const std::vector<uint8_t> sidechain_data = m_poolBlockTemplate->serialize_sidechain_data();
|
||||
const std::vector<uint8_t>& consensus_id = m_sidechain->consensus_id();
|
||||
|
||||
m_sidechainHashBlob = m_poolBlockTemplate->serialize_mainchain_data();
|
||||
m_sidechainHashBlob.insert(m_sidechainHashBlob.end(), sidechain_data.begin(), sidechain_data.end());
|
||||
m_sidechainHashBlob.insert(m_sidechainHashBlob.end(), consensus_id.begin(), consensus_id.end());
|
||||
|
||||
{
|
||||
memset(m_sidechainHashKeccakState, 0, sizeof(m_sidechainHashKeccakState));
|
||||
|
||||
const size_t extra_nonce_offset = m_sidechainHashBlob.size() - HASH_SIZE - ((sidechain_version > 1) ? EXTRA_NONCE_SIZE : 0);
|
||||
if (extra_nonce_offset >= KeccakParams::HASH_DATA_AREA) {
|
||||
// Sidechain data is big enough to cache keccak state up to extra_nonce
|
||||
m_sidechainHashInputLength = (extra_nonce_offset / KeccakParams::HASH_DATA_AREA) * KeccakParams::HASH_DATA_AREA;
|
||||
|
||||
const uint8_t* in = m_sidechainHashBlob.data();
|
||||
int inlen = static_cast<int>(m_sidechainHashInputLength);
|
||||
|
||||
keccak_step(in, inlen, m_sidechainHashKeccakState);
|
||||
}
|
||||
else {
|
||||
m_sidechainHashInputLength = 0;
|
||||
}
|
||||
}
|
||||
|
||||
m_fullDataBlob = m_blockTemplateBlob;
|
||||
m_fullDataBlob.insert(m_fullDataBlob.end(), sidechain_data.begin(), sidechain_data.end());
|
||||
|
||||
m_poolBlockTemplate->m_sidechainId = calc_sidechain_hash(0);
|
||||
|
||||
if (pool_block_debug()) {
|
||||
const size_t sidechain_hash_offset = m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2;
|
||||
|
||||
memcpy(m_blockTemplateBlob.data() + sidechain_hash_offset, m_poolBlockTemplate->m_sidechainId.h, HASH_SIZE);
|
||||
memcpy(m_fullDataBlob.data() + sidechain_hash_offset, m_poolBlockTemplate->m_sidechainId.h, HASH_SIZE);
|
||||
memcpy(m_minerTx.data() + sidechain_hash_offset - m_minerTxOffsetInTemplate, m_poolBlockTemplate->m_sidechainId.h, HASH_SIZE);
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
const std::vector<uint8_t> mainchain_data = m_poolBlockTemplate->serialize_mainchain_data();
|
||||
const std::vector<uint8_t> sidechain_data = m_poolBlockTemplate->serialize_sidechain_data();
|
||||
|
||||
if (mainchain_data != m_blockTemplateBlob) {
|
||||
LOGERR(1, "serialize_mainchain_data() has a bug, fix it! ");
|
||||
LOGERR(1, "m_poolBlockTemplate->m_mainChainData.size() = " << mainchain_data.size());
|
||||
LOGERR(1, "mainchain_data.size() = " << mainchain_data.size());
|
||||
LOGERR(1, "m_blockTemplateBlob.size() = " << m_blockTemplateBlob.size());
|
||||
for (size_t i = 0, n = std::min(mainchain_data.size(), m_blockTemplateBlob.size()); i < n; ++i) {
|
||||
if (mainchain_data[i] != m_blockTemplateBlob[i]) {
|
||||
LOGERR(1, "m_poolBlockTemplate->m_mainChainData is different at offset " << i);
|
||||
LOGERR(1, "mainchain_data is different at offset " << i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<uint8_t> buf = m_blockTemplateBlob;
|
||||
buf.insert(buf.end(), sidechain_data.begin(), sidechain_data.end());
|
||||
|
||||
PoolBlock check;
|
||||
const int result = check.deserialize(buf.data(), buf.size(), *m_sidechain, nullptr, false);
|
||||
const int result = check.deserialize(m_fullDataBlob.data(), m_fullDataBlob.size(), *m_sidechain, nullptr, false);
|
||||
if (result != 0) {
|
||||
LOGERR(1, "pool block blob generation and/or parsing is broken, error " << result);
|
||||
}
|
||||
else {
|
||||
LOGINFO(6, "blob size = " << buf.size());
|
||||
LOGINFO(6, "blob size = " << m_fullDataBlob.size());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
memset(m_minerTxKeccakState, 0, sizeof(m_minerTxKeccakState));
|
||||
|
||||
const size_t extra_nonce_offset = m_extraNonceOffsetInTemplate - m_minerTxOffsetInTemplate;
|
||||
if (extra_nonce_offset >= KeccakParams::HASH_DATA_AREA) {
|
||||
// Miner transaction is big enough to cache keccak state up to extra_nonce
|
||||
m_minerTxKeccakStateInputLength = (extra_nonce_offset / KeccakParams::HASH_DATA_AREA) * KeccakParams::HASH_DATA_AREA;
|
||||
|
||||
const uint8_t* in = m_blockTemplateBlob.data() + m_minerTxOffsetInTemplate;
|
||||
int inlen = static_cast<int>(m_minerTxKeccakStateInputLength);
|
||||
|
||||
keccak_step(in, inlen, m_minerTxKeccakState);
|
||||
}
|
||||
else {
|
||||
m_minerTxKeccakStateInputLength = 0;
|
||||
}
|
||||
|
||||
const hash minerTx_hash = calc_miner_tx_hash(0);
|
||||
|
||||
|
@ -760,7 +882,7 @@ int BlockTemplate::create_miner_tx(const MinerData& data, const std::vector<Mine
|
|||
}
|
||||
else {
|
||||
hash eph_public_key;
|
||||
if (!shares[i].m_wallet->get_eph_public_key(m_txkeySec, i, eph_public_key, view_tag)) {
|
||||
if (!shares[i].m_wallet->get_eph_public_key(m_poolBlockTemplate->m_txkeySec, i, eph_public_key, view_tag)) {
|
||||
LOGERR(1, "get_eph_public_key failed at index " << i);
|
||||
}
|
||||
m_minerTx.insert(m_minerTx.end(), eph_public_key.h, eph_public_key.h + HASH_SIZE);
|
||||
|
@ -783,14 +905,11 @@ int BlockTemplate::create_miner_tx(const MinerData& data, const std::vector<Mine
|
|||
return -2;
|
||||
}
|
||||
|
||||
m_poolBlockTemplate->m_txkeyPub = m_txkeyPub;
|
||||
m_poolBlockTemplate->m_txkeySec = m_txkeySec;
|
||||
|
||||
// TX_EXTRA begin
|
||||
m_minerTxExtra.clear();
|
||||
|
||||
m_minerTxExtra.push_back(TX_EXTRA_TAG_PUBKEY);
|
||||
m_minerTxExtra.insert(m_minerTxExtra.end(), m_txkeyPub.h, m_txkeyPub.h + HASH_SIZE);
|
||||
m_minerTxExtra.insert(m_minerTxExtra.end(), m_poolBlockTemplate->m_txkeyPub.h, m_poolBlockTemplate->m_txkeyPub.h + HASH_SIZE);
|
||||
|
||||
m_minerTxExtra.push_back(TX_EXTRA_NONCE);
|
||||
|
||||
|
@ -828,48 +947,60 @@ int BlockTemplate::create_miner_tx(const MinerData& data, const std::vector<Mine
|
|||
return 1;
|
||||
}
|
||||
|
||||
hash BlockTemplate::calc_sidechain_hash() const
|
||||
hash BlockTemplate::calc_sidechain_hash(uint32_t sidechain_extra_nonce) const
|
||||
{
|
||||
// Calculate side-chain hash (all block template bytes + all side-chain bytes + consensus ID, replacing NONCE, EXTRA_NONCE and HASH itself with 0's)
|
||||
hash sidechain_hash;
|
||||
const int sidechain_hash_offset = static_cast<int>(m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize) + 2;
|
||||
const int blob_size = static_cast<int>(m_blockTemplateBlob.size());
|
||||
const int v = m_poolBlockTemplate->get_sidechain_version();
|
||||
const size_t size = m_sidechainHashBlob.size();
|
||||
const size_t N = m_sidechainHashInputLength;
|
||||
|
||||
const std::vector<uint8_t>& consensus_id = m_sidechain->consensus_id();
|
||||
const std::vector<uint8_t> sidechain_data = m_poolBlockTemplate->serialize_sidechain_data();
|
||||
const size_t sidechain_extra_nonce_offset = size - HASH_SIZE - ((v > 1) ? EXTRA_NONCE_SIZE : 0);
|
||||
const uint8_t sidechain_extra_nonce_buf[EXTRA_NONCE_SIZE] = {
|
||||
static_cast<uint8_t>(sidechain_extra_nonce >> 0),
|
||||
static_cast<uint8_t>(sidechain_extra_nonce >> 8),
|
||||
static_cast<uint8_t>(sidechain_extra_nonce >> 16),
|
||||
static_cast<uint8_t>(sidechain_extra_nonce >> 24)
|
||||
};
|
||||
|
||||
keccak_custom([this, sidechain_hash_offset, blob_size, consensus_id, &sidechain_data](int offset) -> uint8_t {
|
||||
uint32_t k = static_cast<uint32_t>(offset - static_cast<int>(m_nonceOffset));
|
||||
if (k < NONCE_SIZE) {
|
||||
return 0;
|
||||
}
|
||||
hash result;
|
||||
uint8_t buf[288];
|
||||
|
||||
k = static_cast<uint32_t>(offset - static_cast<int>(m_extraNonceOffsetInTemplate));
|
||||
const bool b = N && (N <= sidechain_extra_nonce_offset) && (N < size) && (size - N <= sizeof(buf));
|
||||
|
||||
// Slow path: O(N)
|
||||
if (!b || pool_block_debug()) {
|
||||
keccak_custom([this, v, sidechain_extra_nonce_offset, &sidechain_extra_nonce_buf](int offset) -> uint8_t {
|
||||
if (v > 1) {
|
||||
const uint32_t k = static_cast<uint32_t>(offset - sidechain_extra_nonce_offset);
|
||||
if (k < EXTRA_NONCE_SIZE) {
|
||||
return 0;
|
||||
return sidechain_extra_nonce_buf[k];
|
||||
}
|
||||
}
|
||||
return m_sidechainHashBlob[offset];
|
||||
}, static_cast<int>(size), result.h, HASH_SIZE);
|
||||
}
|
||||
|
||||
k = static_cast<uint32_t>(offset - sidechain_hash_offset);
|
||||
if (k < HASH_SIZE) {
|
||||
return 0;
|
||||
// Fast path: O(1)
|
||||
if (b) {
|
||||
const int inlen = static_cast<int>(size - N);
|
||||
|
||||
memcpy(buf, m_sidechainHashBlob.data() + N, size - N);
|
||||
if (v > 1) {
|
||||
memcpy(buf + sidechain_extra_nonce_offset - N, sidechain_extra_nonce_buf, EXTRA_NONCE_SIZE);
|
||||
}
|
||||
|
||||
if (offset < blob_size) {
|
||||
return m_blockTemplateBlob[offset];
|
||||
uint64_t st[25];
|
||||
memcpy(st, m_sidechainHashKeccakState, sizeof(st));
|
||||
keccak_finish(buf, inlen, st);
|
||||
|
||||
if (pool_block_debug() && (memcmp(st, result.h, HASH_SIZE) != 0)) {
|
||||
LOGERR(1, "calc_sidechain_hash fast path is broken. Fix the code!");
|
||||
}
|
||||
|
||||
const int side_chain_data_offsset = offset - blob_size;
|
||||
const int side_chain_data_size = static_cast<int>(sidechain_data.size());
|
||||
if (side_chain_data_offsset < side_chain_data_size) {
|
||||
return sidechain_data[side_chain_data_offsset];
|
||||
memcpy(result.h, st, HASH_SIZE);
|
||||
}
|
||||
|
||||
const int consensus_id_offset = side_chain_data_offsset - side_chain_data_size;
|
||||
return consensus_id[consensus_id_offset];
|
||||
},
|
||||
static_cast<int>(m_blockTemplateBlob.size() + sidechain_data.size() + consensus_id.size()), sidechain_hash.h, HASH_SIZE);
|
||||
|
||||
return sidechain_hash;
|
||||
return result;
|
||||
}
|
||||
|
||||
hash BlockTemplate::calc_miner_tx_hash(uint32_t extra_nonce) const
|
||||
|
@ -879,7 +1010,7 @@ hash BlockTemplate::calc_miner_tx_hash(uint32_t extra_nonce) const
|
|||
|
||||
const uint8_t* data = m_blockTemplateBlob.data() + m_minerTxOffsetInTemplate;
|
||||
|
||||
const int extra_nonce_offset = static_cast<int>(m_extraNonceOffsetInTemplate - m_minerTxOffsetInTemplate);
|
||||
const size_t extra_nonce_offset = m_extraNonceOffsetInTemplate - m_minerTxOffsetInTemplate;
|
||||
const uint8_t extra_nonce_buf[EXTRA_NONCE_SIZE] = {
|
||||
static_cast<uint8_t>(extra_nonce >> 0),
|
||||
static_cast<uint8_t>(extra_nonce >> 8),
|
||||
|
@ -887,17 +1018,57 @@ hash BlockTemplate::calc_miner_tx_hash(uint32_t extra_nonce) const
|
|||
static_cast<uint8_t>(extra_nonce >> 24)
|
||||
};
|
||||
|
||||
// Calculate sidechain id with this extra_nonce
|
||||
const hash sidechain_id = calc_sidechain_hash(extra_nonce);
|
||||
const size_t sidechain_hash_offset = extra_nonce_offset + m_poolBlockTemplate->m_extraNonceSize + 2;
|
||||
|
||||
// 1. Prefix (everything except vin_rct_type byte in the end)
|
||||
// Apply extra_nonce in-place because we can't write to the block template here
|
||||
keccak_custom([data, extra_nonce_offset, &extra_nonce_buf](int offset)
|
||||
const size_t tx_size = m_minerTxSize - 1;
|
||||
|
||||
hash full_hash;
|
||||
uint8_t tx_buf[288];
|
||||
|
||||
const size_t N = m_minerTxKeccakStateInputLength;
|
||||
const bool b = N && (N <= extra_nonce_offset) && (N < tx_size) && (tx_size - N <= sizeof(tx_buf));
|
||||
|
||||
// Slow path: O(N)
|
||||
if (!b || pool_block_debug())
|
||||
{
|
||||
const uint32_t k = static_cast<uint32_t>(offset - extra_nonce_offset);
|
||||
keccak_custom([data, extra_nonce_offset, &extra_nonce_buf, sidechain_hash_offset, &sidechain_id](int offset) {
|
||||
uint32_t k = static_cast<uint32_t>(offset - static_cast<int>(extra_nonce_offset));
|
||||
if (k < EXTRA_NONCE_SIZE) {
|
||||
return extra_nonce_buf[k];
|
||||
}
|
||||
|
||||
k = static_cast<uint32_t>(offset - static_cast<int>(sidechain_hash_offset));
|
||||
if (k < HASH_SIZE) {
|
||||
return sidechain_id.h[k];
|
||||
}
|
||||
|
||||
return data[offset];
|
||||
},
|
||||
static_cast<int>(m_minerTxSize) - 1, hashes, HASH_SIZE);
|
||||
}, static_cast<int>(tx_size), full_hash.h, HASH_SIZE);
|
||||
memcpy(hashes, full_hash.h, HASH_SIZE);
|
||||
}
|
||||
|
||||
// Fast path: O(1)
|
||||
if (b) {
|
||||
const int inlen = static_cast<int>(tx_size - N);
|
||||
|
||||
memcpy(tx_buf, data + N, inlen);
|
||||
memcpy(tx_buf + extra_nonce_offset - N, extra_nonce_buf, EXTRA_NONCE_SIZE);
|
||||
memcpy(tx_buf + sidechain_hash_offset - N, sidechain_id.h, HASH_SIZE);
|
||||
|
||||
uint64_t st[25];
|
||||
memcpy(st, m_minerTxKeccakState, sizeof(st));
|
||||
keccak_finish(tx_buf, inlen, st);
|
||||
|
||||
if (pool_block_debug() && (memcmp(st, full_hash.h, HASH_SIZE) != 0)) {
|
||||
LOGERR(1, "calc_miner_tx_hash fast path is broken. Fix the code!");
|
||||
}
|
||||
|
||||
memcpy(hashes, st, HASH_SIZE);
|
||||
}
|
||||
|
||||
// 2. Base RCT, single 0 byte in miner tx
|
||||
static constexpr uint8_t known_second_hash[HASH_SIZE] = {
|
||||
|
@ -910,7 +1081,7 @@ hash BlockTemplate::calc_miner_tx_hash(uint32_t extra_nonce) const
|
|||
|
||||
// Calculate miner transaction hash
|
||||
hash result;
|
||||
keccak(hashes, sizeof(hashes), result.h, HASH_SIZE);
|
||||
keccak(hashes, sizeof(hashes), result.h);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -920,16 +1091,14 @@ void BlockTemplate::calc_merkle_tree_main_branch()
|
|||
m_merkleTreeMainBranch.clear();
|
||||
|
||||
const uint64_t count = m_numTransactionHashes + 1;
|
||||
if (count == 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
const uint8_t* h = m_transactionHashes.data();
|
||||
|
||||
hash root_hash;
|
||||
|
||||
if (count == 1) {
|
||||
memcpy(root_hash.h, h, HASH_SIZE);
|
||||
}
|
||||
else if (count == 2) {
|
||||
if (count == 2) {
|
||||
m_merkleTreeMainBranch.insert(m_merkleTreeMainBranch.end(), h + HASH_SIZE, h + HASH_SIZE * 2);
|
||||
keccak(h, HASH_SIZE * 2, root_hash.h, HASH_SIZE);
|
||||
}
|
||||
else {
|
||||
size_t i, j, cnt;
|
||||
|
@ -941,11 +1110,14 @@ void BlockTemplate::calc_merkle_tree_main_branch()
|
|||
std::vector<uint8_t> ints(cnt * HASH_SIZE);
|
||||
memcpy(ints.data(), h, (cnt * 2 - count) * HASH_SIZE);
|
||||
|
||||
hash tmp;
|
||||
|
||||
for (i = cnt * 2 - count, j = cnt * 2 - count; j < cnt; i += 2, ++j) {
|
||||
if (i == 0) {
|
||||
m_merkleTreeMainBranch.insert(m_merkleTreeMainBranch.end(), h + HASH_SIZE, h + HASH_SIZE * 2);
|
||||
}
|
||||
keccak(h + i * HASH_SIZE, HASH_SIZE * 2, ints.data() + j * HASH_SIZE, HASH_SIZE);
|
||||
keccak(h + i * HASH_SIZE, HASH_SIZE * 2, tmp.h);
|
||||
memcpy(ints.data() + j * HASH_SIZE, tmp.h, HASH_SIZE);
|
||||
}
|
||||
|
||||
while (cnt > 2) {
|
||||
|
@ -954,12 +1126,12 @@ void BlockTemplate::calc_merkle_tree_main_branch()
|
|||
if (i == 0) {
|
||||
m_merkleTreeMainBranch.insert(m_merkleTreeMainBranch.end(), ints.data() + HASH_SIZE, ints.data() + HASH_SIZE * 2);
|
||||
}
|
||||
keccak(ints.data() + i * HASH_SIZE, HASH_SIZE * 2, ints.data() + j * HASH_SIZE, HASH_SIZE);
|
||||
keccak(ints.data() + i * HASH_SIZE, HASH_SIZE * 2, tmp.h);
|
||||
memcpy(ints.data() + j * HASH_SIZE, tmp.h, HASH_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
m_merkleTreeMainBranch.insert(m_merkleTreeMainBranch.end(), ints.data() + HASH_SIZE, ints.data() + HASH_SIZE * 2);
|
||||
keccak(ints.data(), HASH_SIZE * 2, root_hash.h, HASH_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1039,7 +1211,7 @@ uint32_t BlockTemplate::get_hashing_blob_nolock(uint32_t extra_nonce, uint8_t* b
|
|||
memcpy(h, root_hash.h, HASH_SIZE);
|
||||
memcpy(h + HASH_SIZE, m_merkleTreeMainBranch.data() + i, HASH_SIZE);
|
||||
|
||||
keccak(h, HASH_SIZE * 2, root_hash.h, HASH_SIZE);
|
||||
keccak(h, HASH_SIZE * 2, root_hash.h);
|
||||
}
|
||||
|
||||
memcpy(p, root_hash.h, HASH_SIZE);
|
||||
|
@ -1095,23 +1267,27 @@ uint32_t BlockTemplate::get_hashing_blobs(uint32_t extra_nonce_start, uint32_t c
|
|||
return blob_size;
|
||||
}
|
||||
|
||||
std::vector<uint8_t> BlockTemplate::get_block_template_blob(uint32_t template_id, size_t& nonce_offset, size_t& extra_nonce_offset) const
|
||||
std::vector<uint8_t> BlockTemplate::get_block_template_blob(uint32_t template_id, uint32_t sidechain_extra_nonce, size_t& nonce_offset, size_t& extra_nonce_offset, size_t& sidechain_id_offset, hash& sidechain_id) const
|
||||
{
|
||||
ReadLock lock(m_lock);
|
||||
|
||||
if (template_id != m_templateId) {
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
if (old && (template_id == old->m_templateId)) {
|
||||
return old->get_block_template_blob(template_id, nonce_offset, extra_nonce_offset);
|
||||
return old->get_block_template_blob(template_id, sidechain_extra_nonce, nonce_offset, extra_nonce_offset, sidechain_id_offset, sidechain_id);
|
||||
}
|
||||
|
||||
nonce_offset = 0;
|
||||
extra_nonce_offset = 0;
|
||||
sidechain_id_offset = 0;
|
||||
sidechain_id = {};
|
||||
return std::vector<uint8_t>();
|
||||
}
|
||||
|
||||
nonce_offset = m_nonceOffset;
|
||||
extra_nonce_offset = m_extraNonceOffsetInTemplate;
|
||||
sidechain_id_offset = m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2;
|
||||
sidechain_id = calc_sidechain_hash(sidechain_extra_nonce);
|
||||
return m_blockTemplateBlob;
|
||||
}
|
||||
|
||||
|
@ -1122,9 +1298,10 @@ bool BlockTemplate::submit_sidechain_block(uint32_t template_id, uint32_t nonce,
|
|||
if (template_id == m_templateId) {
|
||||
m_poolBlockTemplate->m_nonce = nonce;
|
||||
m_poolBlockTemplate->m_extraNonce = extra_nonce;
|
||||
m_poolBlockTemplate->m_sidechainId = calc_sidechain_hash(extra_nonce);
|
||||
m_poolBlockTemplate->m_sidechainExtraBuf[3] = extra_nonce;
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
{
|
||||
if (pool_block_debug()) {
|
||||
std::vector<uint8_t> buf = m_poolBlockTemplate->serialize_mainchain_data();
|
||||
const std::vector<uint8_t> sidechain_data = m_poolBlockTemplate->serialize_sidechain_data();
|
||||
|
||||
|
@ -1149,7 +1326,6 @@ bool BlockTemplate::submit_sidechain_block(uint32_t template_id, uint32_t nonce,
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
m_poolBlockTemplate->m_verified = true;
|
||||
if (!m_sidechain->block_seen(*m_poolBlockTemplate)) {
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
uint32_t get_hashing_blob(uint32_t extra_nonce, uint8_t (&blob)[128], uint64_t& height, uint64_t& sidechain_height, difficulty_type& difficulty, difficulty_type& sidechain_difficulty, hash& seed_hash, size_t& nonce_offset, uint32_t& template_id) const;
|
||||
uint32_t get_hashing_blobs(uint32_t extra_nonce_start, uint32_t count, std::vector<uint8_t>& blobs, uint64_t& height, difficulty_type& difficulty, difficulty_type& sidechain_difficulty, hash& seed_hash, size_t& nonce_offset, uint32_t& template_id) const;
|
||||
|
||||
std::vector<uint8_t> get_block_template_blob(uint32_t template_id, size_t& nonce_offset, size_t& extra_nonce_offset) const;
|
||||
std::vector<uint8_t> get_block_template_blob(uint32_t template_id, uint32_t sidechain_extra_nonce, size_t& nonce_offset, size_t& extra_nonce_offset, size_t& sidechain_id_offset, hash& sidechain_id) const;
|
||||
|
||||
FORCEINLINE uint64_t height() const { return m_height; }
|
||||
FORCEINLINE difficulty_type difficulty() const { return m_difficulty; }
|
||||
|
@ -56,7 +56,12 @@ public:
|
|||
bool submit_sidechain_block(uint32_t template_id, uint32_t nonce, uint32_t extra_nonce);
|
||||
|
||||
FORCEINLINE const std::vector<MinerShare>& shares() const { return m_shares; }
|
||||
FORCEINLINE uint64_t get_reward() const { return m_finalReward; }
|
||||
|
||||
#ifdef P2POOL_UNIT_TESTS
|
||||
FORCEINLINE const PoolBlock* pool_block_template() const { return m_poolBlockTemplate; }
|
||||
FORCEINLINE std::mt19937_64& rng() { return m_rng; }
|
||||
#endif
|
||||
|
||||
private:
|
||||
SideChain* m_sidechain;
|
||||
|
@ -64,7 +69,7 @@ private:
|
|||
|
||||
private:
|
||||
int create_miner_tx(const MinerData& data, const std::vector<MinerShare>& shares, uint64_t max_reward_amounts_weight, bool dry_run);
|
||||
hash calc_sidechain_hash() const;
|
||||
hash calc_sidechain_hash(uint32_t sidechain_extra_nonce) const;
|
||||
hash calc_miner_tx_hash(uint32_t extra_nonce) const;
|
||||
void calc_merkle_tree_main_branch();
|
||||
|
||||
|
@ -76,6 +81,7 @@ private:
|
|||
std::atomic<uint64_t> m_lastUpdated;
|
||||
|
||||
std::vector<uint8_t> m_blockTemplateBlob;
|
||||
std::vector<uint8_t> m_fullDataBlob;
|
||||
std::vector<uint8_t> m_merkleTreeMainBranch;
|
||||
|
||||
size_t m_blockHeaderSize;
|
||||
|
@ -92,17 +98,21 @@ private:
|
|||
|
||||
uint64_t m_timestamp;
|
||||
|
||||
hash m_txkeyPub;
|
||||
hash m_txkeySec;
|
||||
|
||||
PoolBlock* m_poolBlockTemplate;
|
||||
|
||||
BlockTemplate* m_oldTemplates[4] = {};
|
||||
|
||||
uint64_t m_finalReward;
|
||||
std::atomic<uint64_t> m_finalReward;
|
||||
|
||||
// Temp vectors, will be cleaned up after use and skipped in copy constructor/assignment operators
|
||||
std::vector<uint8_t> m_minerTx;
|
||||
uint64_t m_minerTxKeccakState[25];
|
||||
size_t m_minerTxKeccakStateInputLength;
|
||||
|
||||
std::vector<uint8_t> m_sidechainHashBlob;
|
||||
uint64_t m_sidechainHashKeccakState[25];
|
||||
size_t m_sidechainHashInputLength;
|
||||
|
||||
std::vector<uint8_t> m_blockHeader;
|
||||
std::vector<uint8_t> m_minerTxExtra;
|
||||
std::vector<uint8_t> m_transactionHashes;
|
||||
|
|
|
@ -185,8 +185,8 @@ struct
|
|||
#endif
|
||||
difficulty_type
|
||||
{
|
||||
FORCEINLINE difficulty_type() : lo(0), hi(0) {}
|
||||
FORCEINLINE difficulty_type(uint64_t a, uint64_t b) : lo(a), hi(b) {}
|
||||
FORCEINLINE constexpr difficulty_type() : lo(0), hi(0) {}
|
||||
FORCEINLINE constexpr difficulty_type(uint64_t a, uint64_t b) : lo(a), hi(b) {}
|
||||
|
||||
uint64_t lo;
|
||||
uint64_t hi;
|
||||
|
@ -254,7 +254,10 @@ struct
|
|||
return (lo < other.lo);
|
||||
}
|
||||
|
||||
FORCEINLINE bool operator>(const difficulty_type& other) const { return other.operator<(*this); }
|
||||
|
||||
FORCEINLINE bool operator>=(const difficulty_type& other) const { return !operator<(other); }
|
||||
FORCEINLINE bool operator<=(const difficulty_type& other) const { return !operator>(other); }
|
||||
|
||||
FORCEINLINE bool operator==(const difficulty_type& other) const { return (lo == other.lo) && (hi == other.hi); }
|
||||
FORCEINLINE bool operator!=(const difficulty_type& other) const { return (lo != other.lo) || (hi != other.hi); }
|
||||
|
@ -294,6 +297,8 @@ struct
|
|||
static_assert(sizeof(difficulty_type) == sizeof(uint64_t) * 2, "struct difficulty_type has invalid size, check your compiler options");
|
||||
static_assert(std::is_standard_layout<difficulty_type>::value, "struct difficulty_type is not a POD, check your compiler options");
|
||||
|
||||
static constexpr difficulty_type diff_max = { std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max() };
|
||||
|
||||
template<typename T>
|
||||
FORCEINLINE difficulty_type operator+(const difficulty_type& a, const T& b)
|
||||
{
|
||||
|
|
|
@ -182,7 +182,7 @@ static void do_showpeers(p2pool* m_pool, const char* /* args */)
|
|||
static void do_showworkers(p2pool* m_pool, const char* /* args */)
|
||||
{
|
||||
if (m_pool->stratum_server()) {
|
||||
m_pool->stratum_server()->show_workers();
|
||||
m_pool->stratum_server()->show_workers_async();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ bool check_keys(const hash& pub, const hash& sec)
|
|||
|
||||
static FORCEINLINE void hash_to_scalar(const uint8_t* data, int length, uint8_t (&res)[HASH_SIZE])
|
||||
{
|
||||
keccak(data, length, res, HASH_SIZE);
|
||||
keccak(data, length, res);
|
||||
sc_reduce32(res);
|
||||
}
|
||||
|
||||
|
@ -266,10 +266,10 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& wallet_spend_key, const hash& monero_block_id)
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& seed, const hash& monero_block_id)
|
||||
{
|
||||
std::array<uint8_t, HASH_SIZE * 2> index;
|
||||
memcpy(index.data(), wallet_spend_key.h, HASH_SIZE);
|
||||
memcpy(index.data(), seed.h, HASH_SIZE);
|
||||
memcpy(index.data() + HASH_SIZE, monero_block_id.h, HASH_SIZE);
|
||||
|
||||
{
|
||||
|
@ -287,7 +287,7 @@ public:
|
|||
uint8_t entropy[N + HASH_SIZE * 2];
|
||||
|
||||
memcpy(entropy, domain, N);
|
||||
memcpy(entropy + N, wallet_spend_key.h, HASH_SIZE);
|
||||
memcpy(entropy + N, seed.h, HASH_SIZE);
|
||||
memcpy(entropy + N + HASH_SIZE, monero_block_id.h, HASH_SIZE);
|
||||
|
||||
generate_keys_deterministic(pub, sec, entropy, sizeof(entropy));
|
||||
|
@ -399,9 +399,9 @@ bool derive_public_key(const hash& derivation, size_t output_index, const hash&
|
|||
return cache->get_public_key(derivation, output_index, base, derived_key);
|
||||
}
|
||||
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& wallet_spend_key, const hash& monero_block_id)
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& seed, const hash& monero_block_id)
|
||||
{
|
||||
cache->get_tx_keys(pub, sec, wallet_spend_key, monero_block_id);
|
||||
cache->get_tx_keys(pub, sec, seed, monero_block_id);
|
||||
}
|
||||
|
||||
void derive_view_tag(const hash& derivation, size_t output_index, uint8_t& view_tag)
|
||||
|
@ -416,7 +416,7 @@ void derive_view_tag(const hash& derivation, size_t output_index, uint8_t& view_
|
|||
writeVarint(output_index, [&p](uint8_t b) { *(p++) = b; });
|
||||
|
||||
hash view_tag_full;
|
||||
keccak(buf, static_cast<int>(p - buf), view_tag_full.h, HASH_SIZE);
|
||||
keccak(buf, static_cast<int>(p - buf), view_tag_full.h);
|
||||
view_tag = view_tag_full.h[0];
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace p2pool {
|
|||
|
||||
void generate_keys(hash& pub, hash& sec);
|
||||
void generate_keys_deterministic(hash& pub, hash& sec, const uint8_t* entropy, size_t len);
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& wallet_spend_key, const hash& monero_block_id);
|
||||
void get_tx_keys(hash& pub, hash& sec, const hash& seed, const hash& monero_block_id);
|
||||
bool check_keys(const hash& pub, const hash& sec);
|
||||
bool generate_key_derivation(const hash& key1, const hash& key2, size_t output_index, hash& derivation, uint8_t& view_tag);
|
||||
bool derive_public_key(const hash& derivation, size_t output_index, const hash& base, hash& derived_key);
|
||||
|
|
|
@ -315,7 +315,7 @@ int CurlContext::on_timer(CURLM* /*multi*/, long timeout_ms)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (timeout_ms == 0) {
|
||||
if ((timeout_ms == 0) && !uv_is_closing(reinterpret_cast<uv_handle_t*>(&m_async))) {
|
||||
// 0 ms timeout, but we can't just call on_timeout() here - we have to kick the UV loop
|
||||
const int result = uv_async_send(&m_async);
|
||||
if (result < 0) {
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace p2pool {
|
|||
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
|
||||
#endif
|
||||
|
||||
const uint64_t keccakf_rndc[24] =
|
||||
static const uint64_t keccakf_rndc[24] =
|
||||
{
|
||||
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
|
||||
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
|
||||
|
@ -36,7 +36,7 @@ const uint64_t keccakf_rndc[24] =
|
|||
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
|
||||
};
|
||||
|
||||
NOINLINE void keccakf(uint64_t* st)
|
||||
NOINLINE void keccakf(uint64_t (&st)[25])
|
||||
{
|
||||
for (int round = 0; round < KeccakParams::ROUNDS; ++round) {
|
||||
uint64_t bc[5];
|
||||
|
@ -115,14 +115,10 @@ NOINLINE void keccakf(uint64_t* st)
|
|||
}
|
||||
}
|
||||
|
||||
NOINLINE void keccak(const uint8_t* in, int inlen, uint8_t* md, int mdlen)
|
||||
NOINLINE void keccak_step(const uint8_t* &in, int &inlen, uint64_t (&st)[25])
|
||||
{
|
||||
uint64_t st[25];
|
||||
|
||||
const int rsiz = sizeof(st) == mdlen ? KeccakParams::HASH_DATA_AREA : 200 - 2 * mdlen;
|
||||
const int rsizw = rsiz / 8;
|
||||
|
||||
memset(st, 0, sizeof(st));
|
||||
constexpr int rsiz = KeccakParams::HASH_DATA_AREA;
|
||||
constexpr int rsizw = rsiz / 8;
|
||||
|
||||
for (; inlen >= rsiz; inlen -= rsiz, in += rsiz) {
|
||||
for (int i = 0; i < rsizw; i++) {
|
||||
|
@ -130,6 +126,14 @@ NOINLINE void keccak(const uint8_t* in, int inlen, uint8_t* md, int mdlen)
|
|||
}
|
||||
keccakf(st);
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void keccak_finish(const uint8_t* in, int inlen, uint64_t (&st)[25])
|
||||
{
|
||||
constexpr int rsiz = KeccakParams::HASH_DATA_AREA;
|
||||
constexpr int rsizw = rsiz / 8;
|
||||
|
||||
keccak_step(in, inlen, st);
|
||||
|
||||
// last block and padding
|
||||
alignas(8) uint8_t temp[144];
|
||||
|
@ -144,13 +148,6 @@ NOINLINE void keccak(const uint8_t* in, int inlen, uint8_t* md, int mdlen)
|
|||
}
|
||||
|
||||
keccakf(st);
|
||||
|
||||
memcpy(md, st, mdlen);
|
||||
}
|
||||
|
||||
void keccak(const uint8_t *in, int inlen, uint8_t (&md)[200])
|
||||
{
|
||||
keccak(in, inlen, md, 200);
|
||||
}
|
||||
|
||||
} // namespace p2pool
|
||||
|
|
21
src/keccak.h
21
src/keccak.h
|
@ -24,20 +24,29 @@ enum KeccakParams {
|
|||
ROUNDS = 24,
|
||||
};
|
||||
|
||||
void keccakf(uint64_t* st);
|
||||
void keccak(const uint8_t *in, int inlen, uint8_t *md, int mdlen);
|
||||
void keccak(const uint8_t* in, int inlen, uint8_t (&md)[200]);
|
||||
void keccakf(uint64_t (&st)[25]);
|
||||
void keccak_step(const uint8_t* &in, int &inlen, uint64_t (&st)[25]);
|
||||
void keccak_finish(const uint8_t* in, int inlen, uint64_t (&st)[25]);
|
||||
|
||||
template<size_t N>
|
||||
FORCEINLINE void keccak(const uint8_t* in, int inlen, uint8_t (&md)[N])
|
||||
{
|
||||
static_assert((N == 32) || (N == 200), "invalid size");
|
||||
|
||||
uint64_t st[25] = {};
|
||||
keccak_step(in, inlen, st);
|
||||
keccak_finish(in, inlen, st);
|
||||
memcpy(md, st, N);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
FORCEINLINE void keccak_custom(T&& in, int inlen, uint8_t* md, int mdlen)
|
||||
{
|
||||
uint64_t st[25];
|
||||
uint64_t st[25] = {};
|
||||
|
||||
const int rsiz = sizeof(st) == mdlen ? KeccakParams::HASH_DATA_AREA : 200 - 2 * mdlen;
|
||||
const int rsizw = rsiz / 8;
|
||||
|
||||
memset(st, 0, sizeof(st));
|
||||
|
||||
int offset = 0;
|
||||
|
||||
for (; inlen >= rsiz; inlen -= rsiz, offset += rsiz) {
|
||||
|
|
|
@ -492,7 +492,7 @@ struct DummyStream
|
|||
MSVC_PRAGMA(warning(suppress:26444)) \
|
||||
[=]() { \
|
||||
log::DummyStream x; \
|
||||
x << level << __VA_ARGS__; \
|
||||
x << (level) << __VA_ARGS__; \
|
||||
}; \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -508,7 +508,7 @@ struct DummyStream
|
|||
#define LOG(level, severity, ...) \
|
||||
do { \
|
||||
SIDE_EFFECT_CHECK(level, __VA_ARGS__); \
|
||||
if (level <= log::GLOBAL_LOG_LEVEL) { \
|
||||
if ((level) <= log::GLOBAL_LOG_LEVEL) { \
|
||||
log::Writer CONCAT(log_wrapper_, __LINE__)(severity); \
|
||||
CONCAT(log_wrapper_, __LINE__) << log::Gray() << log_category_prefix; \
|
||||
log::apply_severity<severity>(CONCAT(log_wrapper_, __LINE__)); \
|
||||
|
|
|
@ -39,15 +39,17 @@ Miner::Miner(p2pool* pool, uint32_t threads)
|
|||
, m_threads(threads)
|
||||
, m_stopped{ false }
|
||||
, m_startTimestamp(high_resolution_clock::now())
|
||||
, m_nonce(0)
|
||||
, m_rng(RandomDeviceSeed::instance)
|
||||
, m_fullNonce(std::numeric_limits<uint64_t>::max())
|
||||
, m_nonceTimestamp(m_startTimestamp)
|
||||
, m_totalHashes(0)
|
||||
, m_sharesFound(0)
|
||||
, m_sharesFailed(0)
|
||||
, m_job{}
|
||||
, m_jobIndex{ 0 }
|
||||
{
|
||||
std::random_device rd;
|
||||
m_extraNonce = static_cast<uint32_t>(rd());
|
||||
// Diffuse the initial state in case it has low quality
|
||||
m_rng.discard(10000);
|
||||
|
||||
on_block(m_pool->block_template());
|
||||
|
||||
|
@ -77,15 +79,24 @@ Miner::~Miner()
|
|||
|
||||
void Miner::print_status()
|
||||
{
|
||||
const uint32_t hash_count = 0 - m_nonce.load();
|
||||
const uint32_t hash_count = std::numeric_limits<uint32_t>::max() - static_cast<uint32_t>(m_fullNonce.load());
|
||||
|
||||
const double dt = static_cast<double>(duration_cast<nanoseconds>(high_resolution_clock::now() - m_nonceTimestamp).count()) / 1e9;
|
||||
const uint64_t hr = (dt > 0.0) ? static_cast<uint64_t>(hash_count / dt) : 0;
|
||||
|
||||
char shares_failed_buf[64] = {};
|
||||
log::Stream s(shares_failed_buf);
|
||||
|
||||
const uint32_t shares_found = m_sharesFound;
|
||||
const uint32_t shares_failed = m_sharesFailed;
|
||||
if (shares_failed) {
|
||||
s << log::Yellow() << "\nShares failed = " << shares_failed << log::NoColor();
|
||||
}
|
||||
|
||||
LOGINFO(0, "status" <<
|
||||
"\nThreads = " << m_threads <<
|
||||
"\nHashrate = " << log::Hashrate(hr) <<
|
||||
"\nShares found = " << m_sharesFound.load()
|
||||
"\nShares found = " << shares_found << static_cast<const char*>(shares_failed_buf)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -94,9 +105,12 @@ void Miner::on_block(const BlockTemplate& block)
|
|||
const uint32_t next_index = m_jobIndex ^ 1;
|
||||
Job& j = m_job[next_index];
|
||||
hash seed;
|
||||
j.m_blobSize = block.get_hashing_blob(m_extraNonce, j.m_blob, j.m_height, j.m_sidechainHeight, j.m_diff, j.m_sidechainDiff, seed, j.m_nonceOffset, j.m_templateId);
|
||||
|
||||
const uint32_t hash_count = 0 - m_nonce.exchange(0);
|
||||
const uint32_t extra_nonce = PoolBlock::signal_v2_readiness(static_cast<uint32_t>(m_rng() >> 32));
|
||||
j.m_blobSize = block.get_hashing_blob(extra_nonce, j.m_blob, j.m_height, j.m_sidechainHeight, j.m_diff, j.m_sidechainDiff, seed, j.m_nonceOffset, j.m_templateId);
|
||||
|
||||
const uint64_t next_full_nonce = (static_cast<uint64_t>(extra_nonce) << 32) | std::numeric_limits<uint32_t>::max();
|
||||
const uint32_t hash_count = std::numeric_limits<uint32_t>::max() - static_cast<uint32_t>(m_fullNonce.exchange(next_full_nonce));
|
||||
m_jobIndex = next_index;
|
||||
|
||||
const auto cur_ts = high_resolution_clock::now();
|
||||
|
@ -118,6 +132,7 @@ void Miner::on_block(const BlockTemplate& block)
|
|||
<< ",\"total_hashes\":" << m_totalHashes.load()
|
||||
<< ",\"time_running\":" << time_running
|
||||
<< ",\"shares_found\":" << m_sharesFound.load()
|
||||
<< ",\"shares_failed\":" << m_sharesFailed.load()
|
||||
<< ",\"block_reward_share_percent\":" << block_reward_share_percent
|
||||
<< ",\"threads\":" << m_threads
|
||||
<< "}";
|
||||
|
@ -129,6 +144,7 @@ void Miner::reset_share_counters()
|
|||
{
|
||||
m_totalHashes = 0;
|
||||
m_sharesFound = 0;
|
||||
m_sharesFailed = 0;
|
||||
}
|
||||
|
||||
void Miner::run(void* data)
|
||||
|
@ -193,14 +209,19 @@ void Miner::run(WorkerData* data)
|
|||
if (first) {
|
||||
first = false;
|
||||
memcpy(&job[index], &miner->m_job[miner->m_jobIndex], sizeof(Job));
|
||||
job[index].set_nonce(miner->m_nonce.fetch_sub(1), miner->m_extraNonce);
|
||||
|
||||
const uint64_t full_nonce = miner->m_fullNonce.fetch_sub(1);
|
||||
job[index].set_nonce(static_cast<uint32_t>(full_nonce), static_cast<uint32_t>(full_nonce >> 32));
|
||||
|
||||
randomx_calculate_hash_first(vm, job[index].m_blob, job[index].m_blobSize);
|
||||
}
|
||||
|
||||
const Job& j = job[index];
|
||||
index ^= 1;
|
||||
memcpy(&job[index], &miner->m_job[miner->m_jobIndex], sizeof(Job));
|
||||
job[index].set_nonce(miner->m_nonce.fetch_sub(1), miner->m_extraNonce);
|
||||
|
||||
const uint64_t full_nonce = miner->m_fullNonce.fetch_sub(1);
|
||||
job[index].set_nonce(static_cast<uint32_t>(full_nonce), static_cast<uint32_t>(full_nonce >> 32));
|
||||
|
||||
hash h;
|
||||
randomx_calculate_hash_next(vm, job[index].m_blob, job[index].m_blobSize, &h);
|
||||
|
@ -213,7 +234,12 @@ void Miner::run(WorkerData* data)
|
|||
if (j.m_sidechainDiff.check_pow(h)) {
|
||||
LOGINFO(0, log::Green() << "SHARE FOUND: mainchain height " << j.m_height << ", sidechain height " << j.m_sidechainHeight << ", diff " << j.m_sidechainDiff << ", worker thread " << data->m_index << '/' << data->m_count);
|
||||
++m_sharesFound;
|
||||
m_pool->submit_sidechain_block(j.m_templateId, j.m_nonce, j.m_extraNonce);
|
||||
if (!m_pool->submit_sidechain_block(j.m_templateId, j.m_nonce, j.m_extraNonce)) {
|
||||
if (m_sharesFound > 0) {
|
||||
--m_sharesFound;
|
||||
}
|
||||
++m_sharesFailed;
|
||||
}
|
||||
}
|
||||
|
||||
std::this_thread::yield();
|
||||
|
|
|
@ -54,12 +54,14 @@ private:
|
|||
|
||||
std::chrono::high_resolution_clock::time_point m_startTimestamp;
|
||||
|
||||
std::atomic<uint32_t> m_nonce;
|
||||
std::mt19937_64 m_rng;
|
||||
|
||||
std::atomic<uint64_t> m_fullNonce;
|
||||
std::chrono::high_resolution_clock::time_point m_nonceTimestamp;
|
||||
uint32_t m_extraNonce;
|
||||
|
||||
std::atomic<uint64_t> m_totalHashes;
|
||||
std::atomic<uint32_t> m_sharesFound;
|
||||
std::atomic<uint32_t> m_sharesFailed;
|
||||
|
||||
struct Job
|
||||
{
|
||||
|
|
|
@ -75,7 +75,7 @@ P2PServer::P2PServer(p2pool* pool)
|
|||
[this](bool is_v6, const std::string& /*address*/, const std::string& ip, int port)
|
||||
{
|
||||
if (!str_to_ip(is_v6, ip.c_str(), m_socks5ProxyIP)) {
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_socks5ProxyV6 = is_v6;
|
||||
m_socks5ProxyPort = port;
|
||||
|
@ -95,7 +95,7 @@ P2PServer::P2PServer(p2pool* pool)
|
|||
int err = uv_async_init(&m_loop, &m_broadcastAsync, on_broadcast);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_broadcastAsync.data = this;
|
||||
m_broadcastQueue.reserve(2);
|
||||
|
@ -103,21 +103,21 @@ P2PServer::P2PServer(p2pool* pool)
|
|||
err = uv_async_init(&m_loop, &m_connectToPeersAsync, on_connect_to_peers);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_connectToPeersAsync.data = this;
|
||||
|
||||
err = uv_async_init(&m_loop, &m_showPeersAsync, on_show_peers);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_showPeersAsync.data = this;
|
||||
|
||||
err = uv_timer_init(&m_loop, &m_timer);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to create timer, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
if (m_cache) {
|
||||
|
@ -130,7 +130,7 @@ P2PServer::P2PServer(p2pool* pool)
|
|||
err = uv_timer_start(&m_timer, on_timer, 1000, m_timerInterval * 1000);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to start timer, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
load_peer_list();
|
||||
|
@ -497,7 +497,12 @@ void P2PServer::load_peer_list()
|
|||
hints.ai_flags = AI_ADDRCONFIG;
|
||||
|
||||
addrinfo* result;
|
||||
const int err = getaddrinfo(nodes[i], nullptr, &hints, &result);
|
||||
int err = getaddrinfo(nodes[i], nullptr, &hints, &result);
|
||||
if (err) {
|
||||
LOGWARN(4, "getaddrinfo failed for " << nodes[i] << ": " << gai_strerror(err) << ", retrying with IPv4 only");
|
||||
hints.ai_family = AF_INET;
|
||||
err = getaddrinfo(nodes[i], nullptr, &hints, &result);
|
||||
}
|
||||
if (err == 0) {
|
||||
for (addrinfo* r = result; r != NULL; r = r->ai_next) {
|
||||
const char* addr_str;
|
||||
|
@ -866,7 +871,7 @@ void P2PServer::on_broadcast()
|
|||
}
|
||||
|
||||
for (Broadcast* data : broadcast_queue) {
|
||||
send(client, [client, data](void* buf, size_t buf_size) -> size_t
|
||||
const bool result = send(client, [client, data](void* buf, size_t buf_size) -> size_t
|
||||
{
|
||||
uint8_t* p0 = reinterpret_cast<uint8_t*>(buf);
|
||||
uint8_t* p = p0;
|
||||
|
@ -925,6 +930,11 @@ void P2PServer::on_broadcast()
|
|||
|
||||
return p - p0;
|
||||
});
|
||||
if (!result) {
|
||||
LOGWARN(5, "failed to broadcast to " << static_cast<char*>(client->m_addrString) << ", disconnecting");
|
||||
client->close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1162,9 +1172,9 @@ void P2PServer::check_block_template()
|
|||
return;
|
||||
}
|
||||
|
||||
// Force update block template every 30 seconds after the initial sync is done
|
||||
if (seconds_since_epoch() >= m_pool->block_template().last_updated() + 30) {
|
||||
LOGINFO(4, "block template is 30 seconds old, updating it");
|
||||
// Force update block template every 20 seconds after the initial sync is done
|
||||
if (seconds_since_epoch() >= m_pool->block_template().last_updated() + 20) {
|
||||
LOGINFO(4, "block template is 20 seconds old, updating it");
|
||||
m_pool->update_block_template_async();
|
||||
}
|
||||
}
|
||||
|
@ -1316,7 +1326,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
{
|
||||
case MessageId::HANDSHAKE_CHALLENGE:
|
||||
if (m_handshakeComplete) {
|
||||
LOGWARN(4, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent an unexpected HANDSHAKE_CHALLENGE");
|
||||
LOGWARN(4, "peer " << static_cast<char*>(m_addrString) << " sent an unexpected HANDSHAKE_CHALLENGE");
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1337,7 +1347,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
|
||||
case MessageId::HANDSHAKE_SOLUTION:
|
||||
if (m_handshakeComplete) {
|
||||
LOGWARN(4, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent an unexpected HANDSHAKE_SOLUTION");
|
||||
LOGWARN(4, "peer " << static_cast<char*>(m_addrString) << " sent an unexpected HANDSHAKE_SOLUTION");
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1378,7 +1388,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
case MessageId::BLOCK_REQUEST:
|
||||
++num_block_requests;
|
||||
if (num_block_requests > 100) {
|
||||
LOGWARN(4, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent too many BLOCK_REQUEST messages at once");
|
||||
LOGWARN(4, "peer " << static_cast<char*>(m_addrString) << " sent too many BLOCK_REQUEST messages at once");
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1398,7 +1408,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
|
||||
case MessageId::BLOCK_RESPONSE:
|
||||
if (m_blockPendingRequests.empty()) {
|
||||
LOGWARN(4, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent an unexpected BLOCK_RESPONSE");
|
||||
LOGWARN(4, "peer " << static_cast<char*>(m_addrString) << " sent an unexpected BLOCK_RESPONSE");
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1458,7 +1468,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
|
||||
case MessageId::PEER_LIST_RESPONSE:
|
||||
if (m_peerListPendingRequests <= 0) {
|
||||
LOGWARN(4, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent an unexpected PEER_LIST_RESPONSE");
|
||||
LOGWARN(4, "peer " << static_cast<char*>(m_addrString) << " sent an unexpected PEER_LIST_RESPONSE");
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1469,7 +1479,7 @@ bool P2PServer::P2PClient::on_read(char* data, uint32_t size)
|
|||
if (bytes_left >= 2) {
|
||||
const uint32_t num_peers = buf[1];
|
||||
if (num_peers > PEER_LIST_RESPONSE_MAX_PEERS) {
|
||||
LOGWARN(5, "peer " << log::Gray() << static_cast<char*>(m_addrString) << log::NoColor() << " sent too long peer list (" << num_peers << ')');
|
||||
LOGWARN(5, "peer " << static_cast<char*>(m_addrString) << " sent too long peer list (" << num_peers << ')');
|
||||
ban(DEFAULT_BAN_TIME);
|
||||
server->remove_peer_from_list(this);
|
||||
return false;
|
||||
|
@ -1578,6 +1588,7 @@ void P2PServer::P2PClient::send_handshake_solution(const uint8_t (&challenge)[CH
|
|||
P2PClient* client;
|
||||
P2PServer* server;
|
||||
uint32_t reset_counter;
|
||||
bool is_incoming;
|
||||
|
||||
uint8_t challenge[CHALLENGE_SIZE];
|
||||
uint64_t salt;
|
||||
|
@ -1590,6 +1601,7 @@ void P2PServer::P2PClient::send_handshake_solution(const uint8_t (&challenge)[CH
|
|||
work->client = this;
|
||||
work->server = server;
|
||||
work->reset_counter = m_resetCounter.load();
|
||||
work->is_incoming = m_isIncoming;
|
||||
|
||||
memcpy(work->challenge, challenge, CHALLENGE_SIZE);
|
||||
work->salt = server->get_random64();
|
||||
|
@ -1630,7 +1642,7 @@ void P2PServer::P2PClient::send_handshake_solution(const uint8_t (&challenge)[CH
|
|||
return;
|
||||
}
|
||||
|
||||
if (work->client->m_isIncoming) {
|
||||
if (work->is_incoming) {
|
||||
// This is an incoming connection, so it must do PoW, not us
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "keccak.h"
|
||||
#include <thread>
|
||||
#include <fstream>
|
||||
#include <numeric>
|
||||
|
||||
constexpr char log_category_prefix[] = "P2Pool ";
|
||||
constexpr int BLOCK_HEADERS_REQUIRED = 720;
|
||||
|
@ -64,6 +65,8 @@ p2pool::p2pool(int argc, char* argv[])
|
|||
throw std::exception();
|
||||
}
|
||||
|
||||
m_hostStr = m_params->m_host;
|
||||
|
||||
if (m_params->m_socks5Proxy.empty()) {
|
||||
if (m_params->m_dns) {
|
||||
bool is_v6;
|
||||
|
@ -78,6 +81,16 @@ p2pool::p2pool(int argc, char* argv[])
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
const bool changed = (m_params->m_host != m_hostStr);
|
||||
const std::string rpc_port = ':' + std::to_string(m_params->m_rpcPort);
|
||||
const std::string zmq_port = ":ZMQ:" + std::to_string(m_params->m_zmqPort);
|
||||
m_hostStr += rpc_port + zmq_port;
|
||||
if (changed) {
|
||||
m_hostStr += " (" + m_params->m_host + ')';
|
||||
}
|
||||
}
|
||||
|
||||
hash pub, sec, eph_public_key;
|
||||
generate_keys(pub, sec);
|
||||
|
||||
|
@ -517,19 +530,26 @@ void p2pool::submit_block() const
|
|||
|
||||
size_t nonce_offset = 0;
|
||||
size_t extra_nonce_offset = 0;
|
||||
size_t sidechain_id_offset = 0;
|
||||
hash sidechain_id;
|
||||
bool is_external = false;
|
||||
|
||||
if (submit_data.blob.empty()) {
|
||||
LOGINFO(0, "submit_block: height = " << height << ", template id = " << submit_data.template_id << ", nonce = " << submit_data.nonce << ", extra_nonce = " << submit_data.extra_nonce);
|
||||
submit_data.blob = m_blockTemplate->get_block_template_blob(submit_data.template_id, submit_data.extra_nonce, nonce_offset, extra_nonce_offset, sidechain_id_offset, sidechain_id);
|
||||
|
||||
LOGINFO(0, log::LightGreen() << "submit_block: height = " << height
|
||||
<< ", template id = " << submit_data.template_id
|
||||
<< ", nonce = " << submit_data.nonce
|
||||
<< ", extra_nonce = " << submit_data.extra_nonce
|
||||
<< ", id = " << sidechain_id);
|
||||
|
||||
submit_data.blob = m_blockTemplate->get_block_template_blob(submit_data.template_id, nonce_offset, extra_nonce_offset);
|
||||
if (submit_data.blob.empty()) {
|
||||
LOGERR(0, "submit_block: couldn't find block template with id " << submit_data.template_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
LOGINFO(0, "submit_block: height = " << height << ", external blob (" << submit_data.blob.size() << " bytes)");
|
||||
LOGINFO(0, log::LightGreen() << "submit_block: height = " << height << ", external blob (" << submit_data.blob.size() << " bytes)");
|
||||
is_external = true;
|
||||
}
|
||||
|
||||
|
@ -543,26 +563,28 @@ void p2pool::submit_block() const
|
|||
const uint32_t extra_nonce = submit_data.extra_nonce;
|
||||
|
||||
for (size_t i = 0; i < submit_data.blob.size(); ++i) {
|
||||
char buf[16];
|
||||
|
||||
uint8_t b;
|
||||
if (nonce_offset && nonce_offset <= i && i < nonce_offset + sizeof(submit_data.nonce)) {
|
||||
snprintf(buf, sizeof(buf), "%02x", submit_data.nonce & 255);
|
||||
b = submit_data.nonce & 255;
|
||||
submit_data.nonce >>= 8;
|
||||
}
|
||||
else if (extra_nonce_offset && extra_nonce_offset <= i && i < extra_nonce_offset + sizeof(submit_data.extra_nonce)) {
|
||||
snprintf(buf, sizeof(buf), "%02x", submit_data.extra_nonce & 255);
|
||||
b = submit_data.extra_nonce & 255;
|
||||
submit_data.extra_nonce >>= 8;
|
||||
}
|
||||
else {
|
||||
snprintf(buf, sizeof(buf), "%02x", submit_data.blob[i]);
|
||||
else if (sidechain_id_offset && sidechain_id_offset <= i && i < sidechain_id_offset + HASH_SIZE) {
|
||||
b = sidechain_id.h[i - sidechain_id_offset];
|
||||
}
|
||||
|
||||
request.append(buf);
|
||||
else {
|
||||
b = submit_data.blob[i];
|
||||
}
|
||||
request.append(1, "0123456789abcdef"[b >> 4]);
|
||||
request.append(1, "0123456789abcdef"[b & 15]);
|
||||
}
|
||||
request.append("\"]}");
|
||||
|
||||
JSONRPCRequest::call(m_params->m_host, m_params->m_rpcPort, request, m_params->m_rpcLogin, m_params->m_socks5Proxy,
|
||||
[height, diff, template_id, nonce, extra_nonce, is_external](const char* data, size_t size)
|
||||
[height, diff, template_id, nonce, extra_nonce, sidechain_id, is_external](const char* data, size_t size)
|
||||
{
|
||||
rapidjson::Document doc;
|
||||
if (doc.Parse<rapidjson::kParseCommentsFlag | rapidjson::kParseTrailingCommasFlag>(data, size).HasParseError() || !doc.IsObject()) {
|
||||
|
@ -589,7 +611,7 @@ void p2pool::submit_block() const
|
|||
LOGWARN(3, "submit_block (external blob): daemon returned error: " << (error_msg ? error_msg : "unknown error"));
|
||||
}
|
||||
else {
|
||||
LOGERR(0, "submit_block: daemon returned error: '" << (error_msg ? error_msg : "unknown error") << "', template id = " << template_id << ", nonce = " << nonce << ", extra_nonce = " << extra_nonce);
|
||||
LOGERR(0, "submit_block: daemon returned error: '" << (error_msg ? error_msg : "unknown error") << "', template id = " << template_id << ", nonce = " << nonce << ", extra_nonce = " << extra_nonce << ", id = " << sidechain_id);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -691,14 +713,14 @@ void p2pool::download_block_headers(uint64_t current_height)
|
|||
}
|
||||
else {
|
||||
LOGERR(1, "fatal error: couldn't download block header for seed height " << height);
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
},
|
||||
[height](const char* data, size_t size)
|
||||
{
|
||||
if (size > 0) {
|
||||
LOGERR(1, "fatal error: couldn't download block header for seed height " << height << ", error " << log::const_buf(data, size));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -719,7 +741,7 @@ void p2pool::download_block_headers(uint64_t current_height)
|
|||
}
|
||||
catch (const std::exception& e) {
|
||||
LOGERR(1, "Couldn't start ZMQ reader: exception " << e.what());
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
m_stratumServer = new StratumServer(this);
|
||||
|
@ -922,7 +944,7 @@ void p2pool::parse_get_info_rpc(const char* data, size_t size)
|
|||
|
||||
if (monero_network != sidechain_network) {
|
||||
LOGERR(1, "monerod is on " << monero_network << ", but you're mining to a " << sidechain_network << " sidechain");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
get_version();
|
||||
|
@ -992,7 +1014,7 @@ void p2pool::parse_get_version_rpc(const char* data, size_t size)
|
|||
const uint64_t required_version_hi = required >> 16;
|
||||
const uint64_t required_version_lo = required & 65535;
|
||||
LOGERR(1, "monerod RPC v" << version_hi << '.' << version_lo << " is incompatible, update to RPC >= v" << required_version_hi << '.' << required_version_lo << " (Monero v0.18.0.0 or newer)");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
get_miner_data();
|
||||
|
@ -1032,7 +1054,7 @@ void p2pool::parse_get_miner_data_rpc(const char* data, size_t size)
|
|||
}
|
||||
|
||||
hash h;
|
||||
keccak(reinterpret_cast<const uint8_t*>(data), static_cast<int>(size), h.h, HASH_SIZE);
|
||||
keccak(reinterpret_cast<const uint8_t*>(data), static_cast<int>(size), h.h);
|
||||
if (h == m_getMinerDataHash) {
|
||||
LOGWARN(4, "Received a duplicate get_miner_data RPC response, ignoring it");
|
||||
return;
|
||||
|
@ -1223,6 +1245,9 @@ void p2pool::api_update_pool_stats()
|
|||
const uint64_t miners = std::max<uint64_t>(m_sideChain->miner_count(), m_p2pServer ? m_p2pServer->peer_list_size() : 0U);
|
||||
const difficulty_type total_hashes = m_sideChain->total_hashes();
|
||||
|
||||
const auto& s = m_blockTemplate->shares();
|
||||
const difficulty_type pplns_weight = std::accumulate(s.begin(), s.end(), difficulty_type(), [](const auto& a, const auto& b) { return a + b.m_weight; });
|
||||
|
||||
time_t last_block_found_time = 0;
|
||||
uint64_t last_block_found_height = 0;
|
||||
uint64_t total_blocks_found = 0;
|
||||
|
@ -1237,7 +1262,7 @@ void p2pool::api_update_pool_stats()
|
|||
}
|
||||
|
||||
m_api->set(p2pool_api::Category::POOL, "stats",
|
||||
[hashrate, miners, &total_hashes, last_block_found_time, last_block_found_height, total_blocks_found](log::Stream& s)
|
||||
[hashrate, miners, &total_hashes, last_block_found_time, last_block_found_height, total_blocks_found, &pplns_weight](log::Stream& s)
|
||||
{
|
||||
s << "{\"pool_list\":[\"pplns\"],\"pool_statistics\":{\"hashRate\":" << hashrate
|
||||
<< ",\"miners\":" << miners
|
||||
|
@ -1245,6 +1270,7 @@ void p2pool::api_update_pool_stats()
|
|||
<< ",\"lastBlockFoundTime\":" << last_block_found_time
|
||||
<< ",\"lastBlockFound\":" << last_block_found_height
|
||||
<< ",\"totalBlocksFound\":" << total_blocks_found
|
||||
<< ",\"pplnsWeight\":" << pplns_weight
|
||||
<< "}}";
|
||||
});
|
||||
|
||||
|
@ -1563,7 +1589,7 @@ int p2pool::run()
|
|||
catch (const std::exception& e) {
|
||||
const char* s = e.what();
|
||||
LOGERR(1, "exception " << s);
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
m_stopped = true;
|
||||
|
|
|
@ -46,6 +46,7 @@ public:
|
|||
bool stopped() const { return m_stopped; }
|
||||
void stop();
|
||||
|
||||
const std::string& host_str() const { return m_hostStr; }
|
||||
const Params& params() const { return *m_params; }
|
||||
BlockTemplate& block_template() { return *m_blockTemplate; }
|
||||
SideChain& side_chain() { return *m_sideChain; }
|
||||
|
@ -111,6 +112,7 @@ private:
|
|||
|
||||
std::atomic<bool> m_stopped;
|
||||
|
||||
std::string m_hostStr;
|
||||
Params* m_params;
|
||||
|
||||
p2pool_api* m_api;
|
||||
|
|
|
@ -34,7 +34,7 @@ p2pool_api::p2pool_api(const std::string& api_path, const bool local_stats)
|
|||
{
|
||||
if (m_apiPath.empty()) {
|
||||
LOGERR(1, "api path is empty");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
if ((m_apiPath.back() != '/')
|
||||
|
@ -48,13 +48,13 @@ p2pool_api::p2pool_api(const std::string& api_path, const bool local_stats)
|
|||
struct stat buf;
|
||||
if (stat(m_apiPath.c_str(), &buf) != 0) {
|
||||
LOGERR(1, "path " << m_apiPath << " doesn't exist");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
int result = uv_async_init(uv_default_loop_checked(), &m_dumpToFileAsync, on_dump_to_file);
|
||||
if (result) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(result));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_dumpToFileAsync.data = this;
|
||||
|
||||
|
@ -93,7 +93,7 @@ void p2pool_api::create_dir(const std::string& path)
|
|||
result = errno;
|
||||
if (result != EEXIST) {
|
||||
LOGERR(1, "mkdir(" << path << ") failed, error " << result);
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +124,9 @@ void p2pool_api::dump_to_file_async_internal(Category category, const char* file
|
|||
m_dumpData[path] = std::move(buf);
|
||||
}
|
||||
|
||||
if (!uv_is_closing(reinterpret_cast<uv_handle_t*>(&m_dumpToFileAsync))) {
|
||||
uv_async_send(&m_dumpToFileAsync);
|
||||
}
|
||||
}
|
||||
|
||||
void p2pool_api::dump_to_file()
|
||||
|
@ -170,59 +172,108 @@ void p2pool_api::on_fs_open(uv_fs_t* req)
|
|||
buf[0].base = work->buf.data();
|
||||
buf[0].len = static_cast<uint32_t>(work->buf.size());
|
||||
|
||||
const int result = uv_fs_write(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), buf, 1, 0, on_fs_write);
|
||||
int result = uv_fs_write(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), buf, 1, -1, on_fs_write);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to write to " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
|
||||
result = uv_fs_close(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), on_fs_error_cleanup);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to close " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
delete work;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void p2pool_api::on_fs_write(uv_fs_t* req)
|
||||
{
|
||||
DumpFileWork* work = reinterpret_cast<DumpFileWork*>(req->data);
|
||||
|
||||
if (req->result < 0) {
|
||||
LOGWARN(4, "failed to write to " << work->tmp_name << ", error " << uv_err_name(static_cast<int>(req->result)));
|
||||
}
|
||||
|
||||
int result = static_cast<int>(req->result);
|
||||
uv_fs_req_cleanup(req);
|
||||
|
||||
const int result = uv_fs_close(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), on_fs_close);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to write to " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
}
|
||||
else if (result && (static_cast<size_t>(result) < work->buf.size())) {
|
||||
work->buf.erase(work->buf.begin(), work->buf.begin() + result);
|
||||
|
||||
uv_buf_t buf[1];
|
||||
buf[0].base = work->buf.data();
|
||||
buf[0].len = static_cast<uint32_t>(work->buf.size());
|
||||
|
||||
result = uv_fs_write(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), buf, 1, -1, on_fs_write);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to write to " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
|
||||
result = uv_fs_close(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), on_fs_error_cleanup);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to close " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
delete work;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
result = uv_fs_close(uv_default_loop_checked(), &work->req, static_cast<uv_file>(work->fd), on_fs_close);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to close " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
delete work;
|
||||
}
|
||||
}
|
||||
|
||||
void p2pool_api::on_fs_close(uv_fs_t* req)
|
||||
{
|
||||
DumpFileWork* work = reinterpret_cast<DumpFileWork*>(req->data);
|
||||
|
||||
if (req->result < 0) {
|
||||
LOGWARN(4, "failed to close " << work->tmp_name << ", error " << uv_err_name(static_cast<int>(req->result)));
|
||||
}
|
||||
|
||||
int result = static_cast<int>(req->result);
|
||||
uv_fs_req_cleanup(req);
|
||||
|
||||
const int result = uv_fs_rename(uv_default_loop_checked(), &work->req, work->tmp_name.c_str(), work->name.c_str(), on_fs_rename);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to close " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
}
|
||||
|
||||
result = uv_fs_rename(uv_default_loop_checked(), &work->req, work->tmp_name.c_str(), work->name.c_str(), on_fs_rename);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to rename " << work->tmp_name << " to " << work->name << ", error " << uv_err_name(result));
|
||||
|
||||
result = uv_fs_unlink(uv_default_loop_checked(), &work->req, work->tmp_name.c_str(), on_fs_error_cleanup);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to delete " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
delete work;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void p2pool_api::on_fs_rename(uv_fs_t* req)
|
||||
{
|
||||
DumpFileWork* work = reinterpret_cast<DumpFileWork*>(req->data);
|
||||
int result = static_cast<int>(req->result);
|
||||
uv_fs_req_cleanup(req);
|
||||
|
||||
if (req->result < 0) {
|
||||
LOGWARN(4, "failed to rename " << work->tmp_name << " to " << work->name << ", error " << uv_err_name(static_cast<int>(req->result)));
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to rename " << work->tmp_name << " to " << work->name << ", error " << uv_err_name(result));
|
||||
|
||||
result = uv_fs_unlink(uv_default_loop_checked(), &work->req, work->tmp_name.c_str(), on_fs_error_cleanup);
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to delete " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
delete work;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
delete work;
|
||||
}
|
||||
|
||||
void p2pool_api::on_fs_error_cleanup(uv_fs_t* req)
|
||||
{
|
||||
DumpFileWork* work = reinterpret_cast<DumpFileWork*>(req->data);
|
||||
int result = static_cast<int>(req->result);
|
||||
uv_fs_req_cleanup(req);
|
||||
|
||||
if (result < 0) {
|
||||
LOGWARN(4, "failed to cleanup after previous errors " << work->tmp_name << ", error " << uv_err_name(result));
|
||||
}
|
||||
|
||||
delete work;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ private:
|
|||
static void on_fs_write(uv_fs_t* req);
|
||||
static void on_fs_close(uv_fs_t* req);
|
||||
static void on_fs_rename(uv_fs_t* req);
|
||||
static void on_fs_error_cleanup(uv_fs_t* req);
|
||||
|
||||
std::string m_apiPath;
|
||||
std::string m_networkPath;
|
||||
|
|
|
@ -38,11 +38,13 @@ PoolBlock::PoolBlock()
|
|||
, m_txkeyPub{}
|
||||
, m_extraNonceSize(0)
|
||||
, m_extraNonce(0)
|
||||
, m_txkeySecSeed{}
|
||||
, m_txkeySec{}
|
||||
, m_parent{}
|
||||
, m_sidechainHeight(0)
|
||||
, m_difficulty{}
|
||||
, m_cumulativeDifficulty{}
|
||||
, m_sidechainExtraBuf{}
|
||||
, m_sidechainId{}
|
||||
, m_depth(0)
|
||||
, m_verified(false)
|
||||
|
@ -52,12 +54,10 @@ PoolBlock::PoolBlock()
|
|||
, m_precalculated(false)
|
||||
, m_localTimestamp(seconds_since_epoch())
|
||||
{
|
||||
uv_mutex_init_checked(&m_lock);
|
||||
}
|
||||
|
||||
PoolBlock::PoolBlock(const PoolBlock& b)
|
||||
{
|
||||
uv_mutex_init_checked(&m_lock);
|
||||
operator=(b);
|
||||
}
|
||||
|
||||
|
@ -68,11 +68,6 @@ PoolBlock& PoolBlock::operator=(const PoolBlock& b)
|
|||
return *this;
|
||||
}
|
||||
|
||||
const int lock_result = uv_mutex_trylock(&b.m_lock);
|
||||
if (lock_result) {
|
||||
LOGERR(1, "operator= uv_mutex_trylock failed. Fix the code!");
|
||||
}
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
m_mainChainDataDebug = b.m_mainChainDataDebug;
|
||||
m_sideChainDataDebug = b.m_sideChainDataDebug;
|
||||
|
@ -90,12 +85,14 @@ PoolBlock& PoolBlock::operator=(const PoolBlock& b)
|
|||
m_extraNonce = b.m_extraNonce;
|
||||
m_transactions = b.m_transactions;
|
||||
m_minerWallet = b.m_minerWallet;
|
||||
m_txkeySecSeed = b.m_txkeySecSeed;
|
||||
m_txkeySec = b.m_txkeySec;
|
||||
m_parent = b.m_parent;
|
||||
m_uncles = b.m_uncles;
|
||||
m_sidechainHeight = b.m_sidechainHeight;
|
||||
m_difficulty = b.m_difficulty;
|
||||
m_cumulativeDifficulty = b.m_cumulativeDifficulty;
|
||||
memcpy(m_sidechainExtraBuf, b.m_sidechainExtraBuf, sizeof(m_sidechainExtraBuf));
|
||||
m_sidechainId = b.m_sidechainId;
|
||||
m_depth = b.m_depth;
|
||||
m_verified = b.m_verified;
|
||||
|
@ -106,25 +103,10 @@ PoolBlock& PoolBlock::operator=(const PoolBlock& b)
|
|||
|
||||
m_localTimestamp = seconds_since_epoch();
|
||||
|
||||
if (lock_result == 0) {
|
||||
uv_mutex_unlock(&b.m_lock);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
PoolBlock::~PoolBlock()
|
||||
{
|
||||
uv_mutex_destroy(&m_lock);
|
||||
}
|
||||
|
||||
std::vector<uint8_t> PoolBlock::serialize_mainchain_data(size_t* header_size, size_t* miner_tx_size, int* outputs_offset, int* outputs_blob_size) const
|
||||
{
|
||||
MutexLock lock(m_lock);
|
||||
return serialize_mainchain_data_nolock(header_size, miner_tx_size, outputs_offset, outputs_blob_size);
|
||||
}
|
||||
|
||||
std::vector<uint8_t> PoolBlock::serialize_mainchain_data_nolock(size_t* header_size, size_t* miner_tx_size, int* outputs_offset, int* outputs_blob_size) const
|
||||
std::vector<uint8_t> PoolBlock::serialize_mainchain_data(size_t* header_size, size_t* miner_tx_size, int* outputs_offset, int* outputs_blob_size, const uint32_t* nonce, const uint32_t* extra_nonce) const
|
||||
{
|
||||
std::vector<uint8_t> data;
|
||||
data.reserve(128 + m_outputs.size() * 39 + m_transactions.size() * HASH_SIZE);
|
||||
|
@ -134,7 +116,11 @@ std::vector<uint8_t> PoolBlock::serialize_mainchain_data_nolock(size_t* header_s
|
|||
data.push_back(m_minorVersion);
|
||||
writeVarint(m_timestamp, data);
|
||||
data.insert(data.end(), m_prevId.h, m_prevId.h + HASH_SIZE);
|
||||
data.insert(data.end(), reinterpret_cast<const uint8_t*>(&m_nonce), reinterpret_cast<const uint8_t*>(&m_nonce) + NONCE_SIZE);
|
||||
|
||||
if (!nonce) {
|
||||
nonce = &m_nonce;
|
||||
}
|
||||
data.insert(data.end(), reinterpret_cast<const uint8_t*>(nonce), reinterpret_cast<const uint8_t*>(nonce) + NONCE_SIZE);
|
||||
|
||||
const size_t header_size0 = data.size();
|
||||
if (header_size) {
|
||||
|
@ -187,7 +173,10 @@ std::vector<uint8_t> PoolBlock::serialize_mainchain_data_nolock(size_t* header_s
|
|||
*(p++) = TX_EXTRA_NONCE;
|
||||
*(p++) = static_cast<uint8_t>(extra_nonce_size);
|
||||
|
||||
memcpy(p, &m_extraNonce, EXTRA_NONCE_SIZE);
|
||||
if (!extra_nonce) {
|
||||
extra_nonce = &m_extraNonce;
|
||||
}
|
||||
memcpy(p, extra_nonce, EXTRA_NONCE_SIZE);
|
||||
p += EXTRA_NONCE_SIZE;
|
||||
if (extra_nonce_size > EXTRA_NONCE_SIZE) {
|
||||
memset(p, 0, extra_nonce_size - EXTRA_NONCE_SIZE);
|
||||
|
@ -213,9 +202,9 @@ std::vector<uint8_t> PoolBlock::serialize_mainchain_data_nolock(size_t* header_s
|
|||
data.insert(data.end(), t + HASH_SIZE, t + m_transactions.size() * HASH_SIZE);
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
if (!m_mainChainDataDebug.empty() && (data != m_mainChainDataDebug)) {
|
||||
if ((nonce == &m_nonce) && (extra_nonce == &m_extraNonce) && !m_mainChainDataDebug.empty() && (data != m_mainChainDataDebug)) {
|
||||
LOGERR(1, "serialize_mainchain_data() has a bug, fix it!");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -226,16 +215,23 @@ std::vector<uint8_t> PoolBlock::serialize_sidechain_data() const
|
|||
{
|
||||
std::vector<uint8_t> data;
|
||||
|
||||
MutexLock lock(m_lock);
|
||||
|
||||
data.reserve((m_uncles.size() + 4) * HASH_SIZE + 20);
|
||||
data.reserve((m_uncles.size() + 4) * HASH_SIZE + 36);
|
||||
|
||||
const hash& spend = m_minerWallet.spend_public_key();
|
||||
const hash& view = m_minerWallet.view_public_key();
|
||||
|
||||
const int sidechain_version = get_sidechain_version();
|
||||
|
||||
data.insert(data.end(), spend.h, spend.h + HASH_SIZE);
|
||||
data.insert(data.end(), view.h, view.h + HASH_SIZE);
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
data.insert(data.end(), m_txkeySecSeed.h, m_txkeySecSeed.h + HASH_SIZE);
|
||||
}
|
||||
else {
|
||||
data.insert(data.end(), m_txkeySec.h, m_txkeySec.h + HASH_SIZE);
|
||||
}
|
||||
|
||||
data.insert(data.end(), m_parent.h, m_parent.h + HASH_SIZE);
|
||||
|
||||
writeVarint(m_uncles.size(), data);
|
||||
|
@ -252,10 +248,15 @@ std::vector<uint8_t> PoolBlock::serialize_sidechain_data() const
|
|||
writeVarint(m_cumulativeDifficulty.lo, data);
|
||||
writeVarint(m_cumulativeDifficulty.hi, data);
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
const uint8_t* p = reinterpret_cast<const uint8_t*>(m_sidechainExtraBuf);
|
||||
data.insert(data.end(), p, p + sizeof(m_sidechainExtraBuf));
|
||||
}
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
if (!m_sideChainDataDebug.empty() && (data != m_sideChainDataDebug)) {
|
||||
LOGERR(1, "serialize_sidechain_data() has a bug, fix it!");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -296,10 +297,8 @@ bool PoolBlock::get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const
|
|||
size_t blob_size = 0;
|
||||
|
||||
{
|
||||
MutexLock lock(m_lock);
|
||||
|
||||
size_t header_size, miner_tx_size;
|
||||
const std::vector<uint8_t> mainchain_data = serialize_mainchain_data_nolock(&header_size, &miner_tx_size, nullptr, nullptr);
|
||||
const std::vector<uint8_t> mainchain_data = serialize_mainchain_data(&header_size, &miner_tx_size, nullptr, nullptr, nullptr, nullptr);
|
||||
|
||||
if (!header_size || !miner_tx_size || (mainchain_data.size() < header_size + miner_tx_size)) {
|
||||
LOGERR(1, "tried to calculate PoW of uninitialized block");
|
||||
|
@ -310,18 +309,22 @@ bool PoolBlock::get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const
|
|||
memcpy(blob, mainchain_data.data(), blob_size);
|
||||
|
||||
const uint8_t* miner_tx = mainchain_data.data() + header_size;
|
||||
keccak(miner_tx, static_cast<int>(miner_tx_size) - 1, reinterpret_cast<uint8_t*>(hashes), HASH_SIZE);
|
||||
hash tmp;
|
||||
keccak(miner_tx, static_cast<int>(miner_tx_size) - 1, tmp.h);
|
||||
memcpy(hashes, tmp.h, HASH_SIZE);
|
||||
|
||||
count = m_transactions.size();
|
||||
uint8_t* h = reinterpret_cast<uint8_t*>(m_transactions.data());
|
||||
|
||||
keccak(reinterpret_cast<uint8_t*>(hashes), HASH_SIZE * 3, h, HASH_SIZE);
|
||||
keccak(reinterpret_cast<uint8_t*>(hashes), HASH_SIZE * 3, tmp.h);
|
||||
memcpy(h, tmp.h, HASH_SIZE);
|
||||
|
||||
if (count == 1) {
|
||||
memcpy(blob + blob_size, h, HASH_SIZE);
|
||||
}
|
||||
else if (count == 2) {
|
||||
keccak(h, HASH_SIZE * 2, blob + blob_size, HASH_SIZE);
|
||||
keccak(h, HASH_SIZE * 2, tmp.h);
|
||||
memcpy(blob + blob_size, tmp.h, HASH_SIZE);
|
||||
}
|
||||
else {
|
||||
size_t i, j, cnt;
|
||||
|
@ -334,17 +337,20 @@ bool PoolBlock::get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const
|
|||
memcpy(tmp_ints.data(), h, (cnt * 2 - count) * HASH_SIZE);
|
||||
|
||||
for (i = cnt * 2 - count, j = cnt * 2 - count; j < cnt; i += 2, ++j) {
|
||||
keccak(h + i * HASH_SIZE, HASH_SIZE * 2, tmp_ints.data() + j * HASH_SIZE, HASH_SIZE);
|
||||
keccak(h + i * HASH_SIZE, HASH_SIZE * 2, tmp.h);
|
||||
memcpy(tmp_ints.data() + j * HASH_SIZE, tmp.h, HASH_SIZE);
|
||||
}
|
||||
|
||||
while (cnt > 2) {
|
||||
cnt >>= 1;
|
||||
for (i = 0, j = 0; j < cnt; i += 2, ++j) {
|
||||
keccak(tmp_ints.data() + i * HASH_SIZE, HASH_SIZE * 2, tmp_ints.data() + j * HASH_SIZE, HASH_SIZE);
|
||||
keccak(tmp_ints.data() + i * HASH_SIZE, HASH_SIZE * 2, tmp.h);
|
||||
memcpy(tmp_ints.data() + j * HASH_SIZE, tmp.h, HASH_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
keccak(tmp_ints.data(), HASH_SIZE * 2, blob + blob_size, HASH_SIZE);
|
||||
keccak(tmp_ints.data(), HASH_SIZE * 2, tmp.h);
|
||||
memcpy(blob + blob_size, tmp.h, HASH_SIZE);
|
||||
}
|
||||
}
|
||||
blob_size += HASH_SIZE;
|
||||
|
@ -380,4 +386,46 @@ uint64_t PoolBlock::get_payout(const Wallet& w) const
|
|||
return 0;
|
||||
}
|
||||
|
||||
static constexpr uint64_t VERSION2_MAINNET_TIMESTAMP = 1679173200U; // 2023-03-18 21:00 UTC
|
||||
static constexpr uint64_t VERSION2_TESTNET_TIMESTAMP = 1674507600U; // 2023-01-23 21:00 UTC
|
||||
|
||||
uint32_t PoolBlock::signal_v2_readiness(uint32_t extra_nonce)
|
||||
{
|
||||
const uint64_t ts = (SideChain::network_type() == NetworkType::Mainnet) ? VERSION2_MAINNET_TIMESTAMP : VERSION2_TESTNET_TIMESTAMP;
|
||||
if (time(nullptr) < static_cast<int64_t>(ts)) {
|
||||
return (extra_nonce & 0x007FFFFFUL) | 0xFF000000UL;
|
||||
}
|
||||
return extra_nonce;
|
||||
}
|
||||
|
||||
int PoolBlock::get_sidechain_version() const
|
||||
{
|
||||
const uint64_t ts = (SideChain::network_type() == NetworkType::Mainnet) ? VERSION2_MAINNET_TIMESTAMP : VERSION2_TESTNET_TIMESTAMP;
|
||||
return (m_timestamp >= ts) ? 2 : 1;
|
||||
}
|
||||
|
||||
hash PoolBlock::calculate_tx_key_seed() const
|
||||
{
|
||||
const char domain[] = "tx_key_seed";
|
||||
const uint32_t zero = 0;
|
||||
|
||||
const std::vector<uint8_t> mainchain_data = serialize_mainchain_data(nullptr, nullptr, nullptr, nullptr, &zero, &zero);
|
||||
const std::vector<uint8_t> sidechain_data = serialize_sidechain_data();
|
||||
|
||||
hash result;
|
||||
keccak_custom([&domain, &mainchain_data, &sidechain_data](int offset) -> uint8_t {
|
||||
size_t k = offset;
|
||||
|
||||
if (k < sizeof(domain)) return domain[k];
|
||||
k -= sizeof(domain);
|
||||
|
||||
if (k < mainchain_data.size()) return mainchain_data[k];
|
||||
k -= mainchain_data.size();
|
||||
|
||||
return sidechain_data[k];
|
||||
}, static_cast<int>(sizeof(domain) + mainchain_data.size() + sidechain_data.size()), result.h, HASH_SIZE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace p2pool
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
|
||||
namespace p2pool {
|
||||
|
||||
static FORCEINLINE constexpr int pool_block_debug() { return POOL_BLOCK_DEBUG; }
|
||||
|
||||
class RandomX_Hasher_Base;
|
||||
class SideChain;
|
||||
|
||||
|
@ -56,13 +58,10 @@ struct DifficultyData
|
|||
struct PoolBlock
|
||||
{
|
||||
PoolBlock();
|
||||
~PoolBlock();
|
||||
|
||||
PoolBlock(const PoolBlock& b);
|
||||
PoolBlock& operator=(const PoolBlock& b);
|
||||
|
||||
mutable uv_mutex_t m_lock;
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
std::vector<uint8_t> m_mainChainDataDebug;
|
||||
std::vector<uint8_t> m_sideChainDataDebug;
|
||||
|
@ -104,6 +103,7 @@ struct PoolBlock
|
|||
|
||||
// Transaction secret key
|
||||
// Required to check that pub keys in the miner transaction pay out to correct miner wallet addresses
|
||||
hash m_txkeySecSeed;
|
||||
hash m_txkeySec;
|
||||
|
||||
// Side-chain parent and uncle blocks
|
||||
|
@ -115,6 +115,9 @@ struct PoolBlock
|
|||
difficulty_type m_difficulty;
|
||||
difficulty_type m_cumulativeDifficulty;
|
||||
|
||||
// Arbitrary extra data
|
||||
uint32_t m_sidechainExtraBuf[4];
|
||||
|
||||
// HASH (see diagram in the comment above)
|
||||
hash m_sidechainId;
|
||||
|
||||
|
@ -131,8 +134,7 @@ struct PoolBlock
|
|||
|
||||
uint64_t m_localTimestamp;
|
||||
|
||||
std::vector<uint8_t> serialize_mainchain_data(size_t* header_size = nullptr, size_t* miner_tx_size = nullptr, int* outputs_offset = nullptr, int* outputs_blob_size = nullptr) const;
|
||||
std::vector<uint8_t> serialize_mainchain_data_nolock(size_t* header_size, size_t* miner_tx_size, int* outputs_offset, int* outputs_blob_size) const;
|
||||
std::vector<uint8_t> serialize_mainchain_data(size_t* header_size = nullptr, size_t* miner_tx_size = nullptr, int* outputs_offset = nullptr, int* outputs_blob_size = nullptr, const uint32_t* nonce = nullptr, const uint32_t* extra_nonce = nullptr) const;
|
||||
std::vector<uint8_t> serialize_sidechain_data() const;
|
||||
|
||||
int deserialize(const uint8_t* data, size_t size, const SideChain& sidechain, uv_loop_t* loop, bool compact);
|
||||
|
@ -146,6 +148,12 @@ struct PoolBlock
|
|||
// but P2Pool can switch to using only TXOUT_TO_TAGGED_KEY for miner payouts starting from v15
|
||||
FORCEINLINE uint8_t get_tx_type() const { return (m_majorVersion < HARDFORK_VIEW_TAGS_VERSION) ? TXOUT_TO_KEY : TXOUT_TO_TAGGED_KEY; }
|
||||
|
||||
// Signal hardfork readiness (only before the v2 hardfork)
|
||||
// TODO: remove this code after hardfork
|
||||
static uint32_t signal_v2_readiness(uint32_t extra_nonce);
|
||||
|
||||
int get_sidechain_version() const;
|
||||
|
||||
typedef std::array<uint8_t, HASH_SIZE + NONCE_SIZE + EXTRA_NONCE_SIZE> full_id;
|
||||
|
||||
FORCEINLINE full_id get_full_id() const
|
||||
|
@ -157,6 +165,8 @@ struct PoolBlock
|
|||
memcpy(p + HASH_SIZE + NONCE_SIZE, &m_extraNonce, EXTRA_NONCE_SIZE);
|
||||
return key;
|
||||
}
|
||||
|
||||
hash calculate_tx_key_seed() const;
|
||||
};
|
||||
|
||||
} // namespace p2pool
|
||||
|
|
|
@ -61,8 +61,6 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
|
|||
|
||||
#define READ_BUF(buf, size) do { if (!read_buf((buf), (size))) return __LINE__; } while(0)
|
||||
|
||||
MutexLock lock(m_lock);
|
||||
|
||||
READ_BYTE(m_majorVersion);
|
||||
if (m_majorVersion > HARDFORK_SUPPORTED_VERSION) return __LINE__;
|
||||
|
||||
|
@ -260,11 +258,20 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
|
|||
return __LINE__;
|
||||
}
|
||||
|
||||
READ_BUF(m_txkeySec.h, HASH_SIZE);
|
||||
READ_BUF(m_txkeySecSeed.h, HASH_SIZE);
|
||||
|
||||
if (!check_keys(m_txkeyPub, m_txkeySec)) {
|
||||
const int sidechain_version = get_sidechain_version();
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
hash pub;
|
||||
get_tx_keys(pub, m_txkeySec, m_txkeySecSeed, m_prevId);
|
||||
if (pub != m_txkeyPub) {
|
||||
return __LINE__;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Both values are the same before v2
|
||||
m_txkeySec = m_txkeySecSeed;
|
||||
|
||||
// Enforce deterministic tx keys starting from v15
|
||||
if (m_majorVersion >= HARDFORK_VIEW_TAGS_VERSION) {
|
||||
|
@ -274,6 +281,11 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
|
|||
return __LINE__;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!check_keys(m_txkeyPub, m_txkeySec)) {
|
||||
return __LINE__;
|
||||
}
|
||||
|
||||
READ_BUF(m_parent.h, HASH_SIZE);
|
||||
|
||||
|
@ -317,6 +329,10 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
|
|||
READ_VARINT(m_cumulativeDifficulty.lo);
|
||||
READ_VARINT(m_cumulativeDifficulty.hi);
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
READ_BUF(m_sidechainExtraBuf, sizeof(m_sidechainExtraBuf));
|
||||
}
|
||||
|
||||
#undef READ_BYTE
|
||||
#undef EXPECT_BYTE
|
||||
#undef READ_VARINT
|
||||
|
@ -382,13 +398,13 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
|
|||
},
|
||||
static_cast<int>(size + outputs_blob_size_diff + transactions_blob_size_diff + consensus_id.size()), check.h, HASH_SIZE);
|
||||
|
||||
if (check != m_sidechainId) {
|
||||
return __LINE__;
|
||||
}
|
||||
|
||||
#if POOL_BLOCK_DEBUG
|
||||
m_sideChainDataDebug.assign(sidechain_data_begin, data_end);
|
||||
#endif
|
||||
|
||||
if (check != m_sidechainId) {
|
||||
return __LINE__;
|
||||
}
|
||||
}
|
||||
catch (std::exception& e) {
|
||||
const char* msg = e.what();
|
||||
|
|
|
@ -68,7 +68,7 @@ RandomX_Hasher::RandomX_Hasher(p2pool* pool)
|
|||
m_cache[i] = randomx_alloc_cache(flags);
|
||||
if (!m_cache[i]) {
|
||||
LOGERR(1, "couldn't allocate RandomX cache, aborting");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
memory_allocated += RANDOMX_ARGON_MEMORY * 1024;
|
||||
|
@ -206,7 +206,7 @@ void RandomX_Hasher::set_seed(const hash& seed)
|
|||
m_vm[m_index].vm = randomx_create_vm(flags, m_cache[m_index], nullptr);
|
||||
if (!m_vm[m_index].vm) {
|
||||
LOGERR(1, "couldn't allocate RandomX light VM, aborting");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ void RandomX_Hasher::set_old_seed(const hash& seed)
|
|||
m_vm[old_index].vm = randomx_create_vm(flags, m_cache[old_index], nullptr);
|
||||
if (!m_vm[old_index].vm) {
|
||||
LOGERR(1, "couldn't allocate RandomX light VM, aborting");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ RandomX_Hasher_RPC::RandomX_Hasher_RPC(p2pool* pool)
|
|||
int err = uv_loop_init(&m_loop);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to create event loop, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
// Init loop user data before running it
|
||||
|
@ -394,7 +394,7 @@ RandomX_Hasher_RPC::RandomX_Hasher_RPC(p2pool* pool)
|
|||
err = uv_thread_create(&m_loopThread, loop, this);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to start event loop thread, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,9 +54,10 @@ namespace p2pool {
|
|||
static constexpr uint8_t default_consensus_id[HASH_SIZE] = { 34,175,126,231,181,11,104,146,227,153,218,107,44,108,68,39,178,81,4,212,169,4,142,0,177,110,157,240,68,7,249,24 };
|
||||
static constexpr uint8_t mini_consensus_id[HASH_SIZE] = { 57,130,201,26,149,174,199,250,66,80,189,18,108,216,194,220,136,23,63,24,64,113,221,44,219,86,39,163,53,24,126,196 };
|
||||
|
||||
NetworkType SideChain::s_networkType = NetworkType::Invalid;
|
||||
|
||||
SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
||||
: m_pool(pool)
|
||||
, m_networkType(type)
|
||||
, m_chainTip{ nullptr }
|
||||
, m_seenWalletsLastPruneTime(0)
|
||||
, m_poolName(pool_name ? pool_name : "default")
|
||||
|
@ -64,19 +65,28 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
, m_minDifficulty(MIN_DIFFICULTY, 0)
|
||||
, m_chainWindowSize(2160)
|
||||
, m_unclePenalty(20)
|
||||
, m_curDifficulty(m_minDifficulty)
|
||||
, m_precalcFinished(false)
|
||||
{
|
||||
LOGINFO(1, log::LightCyan() << "network type = " << m_networkType);
|
||||
if (s_networkType == NetworkType::Invalid) {
|
||||
s_networkType = type;
|
||||
}
|
||||
else if (s_networkType != type) {
|
||||
LOGERR(1, "can't run both " << s_networkType << " and " << type << " at the same time");
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
LOGINFO(1, log::LightCyan() << "network type = " << type);
|
||||
|
||||
if (m_pool && !load_config(m_pool->params().m_config)) {
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
if (!check_config()) {
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
m_curDifficulty = m_minDifficulty;
|
||||
|
||||
uv_rwlock_init_checked(&m_sidechainLock);
|
||||
uv_mutex_init_checked(&m_seenWalletsLock);
|
||||
uv_mutex_init_checked(&m_seenBlocksLock);
|
||||
|
@ -89,7 +99,7 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
char buf[log::Stream::BUF_SIZE + 1];
|
||||
log::Stream s(buf);
|
||||
|
||||
s << m_networkType << '\0'
|
||||
s << s_networkType << '\0'
|
||||
<< m_poolName << '\0'
|
||||
<< m_poolPassword << '\0'
|
||||
<< m_targetBlockTime << '\0'
|
||||
|
@ -117,7 +127,7 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
cache = randomx_alloc_cache(flags);
|
||||
if (!cache) {
|
||||
LOGERR(1, "couldn't allocate RandomX cache, aborting");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,12 +151,12 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
}
|
||||
|
||||
hash id;
|
||||
keccak(reinterpret_cast<uint8_t*>(scratchpad), static_cast<int>(scratchpad_size * sizeof(rx_vec_i128)), id.h, HASH_SIZE);
|
||||
keccak(reinterpret_cast<uint8_t*>(scratchpad), static_cast<int>(scratchpad_size * sizeof(rx_vec_i128)), id.h);
|
||||
randomx_release_cache(cache);
|
||||
m_consensusId.assign(id.h, id.h + HASH_SIZE);
|
||||
#else
|
||||
LOGERR(1, "Can't calculate consensus ID without RandomX library");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -158,6 +168,8 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
m_consensusIdDisplayStr.assign(buf);
|
||||
LOGINFO(1, "consensus ID = " << log::LightCyan() << m_consensusIdDisplayStr.c_str());
|
||||
|
||||
memcpy(m_consensusHash.h, m_consensusId.data(), HASH_SIZE);
|
||||
|
||||
uv_cond_init_checked(&m_precalcJobsCond);
|
||||
uv_mutex_init_checked(&m_precalcJobsMutex);
|
||||
m_precalcJobs.reserve(16);
|
||||
|
@ -199,16 +211,17 @@ SideChain::~SideChain()
|
|||
for (const auto& it : m_blocksById) {
|
||||
delete it.second;
|
||||
}
|
||||
|
||||
s_networkType = NetworkType::Invalid;
|
||||
}
|
||||
|
||||
void SideChain::fill_sidechain_data(PoolBlock& block, const Wallet* w, const hash& txkeySec, std::vector<MinerShare>& shares) const
|
||||
void SideChain::fill_sidechain_data(PoolBlock& block, std::vector<MinerShare>& shares) const
|
||||
{
|
||||
ReadLock lock(m_sidechainLock);
|
||||
|
||||
block.m_minerWallet = *w;
|
||||
block.m_txkeySec = txkeySec;
|
||||
const int sidechain_version = block.get_sidechain_version();
|
||||
block.m_uncles.clear();
|
||||
|
||||
ReadLock lock(m_sidechainLock);
|
||||
|
||||
const PoolBlock* tip = m_chainTip;
|
||||
|
||||
if (!tip) {
|
||||
|
@ -217,10 +230,20 @@ void SideChain::fill_sidechain_data(PoolBlock& block, const Wallet* w, const has
|
|||
block.m_difficulty = m_minDifficulty;
|
||||
block.m_cumulativeDifficulty = m_minDifficulty;
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
block.m_txkeySecSeed = m_consensusHash;
|
||||
get_tx_keys(block.m_txkeyPub, block.m_txkeySec, block.m_txkeySecSeed, block.m_prevId);
|
||||
}
|
||||
|
||||
get_shares(&block, shares);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
block.m_txkeySecSeed = (block.m_prevId == tip->m_prevId) ? tip->m_txkeySecSeed : tip->calculate_tx_key_seed();
|
||||
get_tx_keys(block.m_txkeyPub, block.m_txkeySec, block.m_txkeySecSeed, block.m_prevId);
|
||||
}
|
||||
|
||||
block.m_parent = tip->m_sidechainId;
|
||||
block.m_sidechainHeight = tip->m_sidechainHeight + 1;
|
||||
|
||||
|
@ -316,23 +339,50 @@ P2PServer* SideChain::p2pServer() const
|
|||
return m_pool ? m_pool->p2p_server() : nullptr;
|
||||
}
|
||||
|
||||
bool SideChain::get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares) const
|
||||
bool SideChain::get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares, uint64_t* bottom_height, bool quiet) const
|
||||
{
|
||||
shares.clear();
|
||||
shares.reserve(m_chainWindowSize * 2);
|
||||
if (tip->m_txkeySecSeed.empty()) {
|
||||
LOGERR(1, "tx key seed is not set, fix the code!");
|
||||
}
|
||||
|
||||
const int L = quiet ? 6 : 3;
|
||||
|
||||
// Collect shares from each block in the PPLNS window, starting from the "tip"
|
||||
|
||||
uint64_t block_depth = 0;
|
||||
const PoolBlock* cur = tip;
|
||||
|
||||
difficulty_type mainchain_diff
|
||||
#ifdef P2POOL_UNIT_TESTS
|
||||
= m_testMainChainDiff
|
||||
#endif
|
||||
;
|
||||
|
||||
if (m_pool && !tip->m_parent.empty()) {
|
||||
const uint64_t h = p2pool::get_seed_height(tip->m_txinGenHeight);
|
||||
if (!m_pool->get_difficulty_at_height(h, mainchain_diff)) {
|
||||
LOGWARN(L, "get_shares: couldn't get mainchain difficulty for height = " << h);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Dynamic PPLNS window starting from v2
|
||||
// Limit PPLNS weight to 2x of the Monero difficulty (max 2 blocks per PPLNS window on average)
|
||||
const int sidechain_version = tip->get_sidechain_version();
|
||||
const difficulty_type max_pplns_weight = (sidechain_version > 1) ? (mainchain_diff * 2) : diff_max;
|
||||
difficulty_type pplns_weight;
|
||||
|
||||
unordered_set<MinerShare> shares_set;
|
||||
shares_set.reserve(m_chainWindowSize * 2);
|
||||
|
||||
do {
|
||||
MinerShare cur_share{ cur->m_difficulty, &cur->m_minerWallet };
|
||||
difficulty_type cur_weight = cur->m_difficulty;
|
||||
|
||||
for (const hash& uncle_id : cur->m_uncles) {
|
||||
auto it = m_blocksById.find(uncle_id);
|
||||
if (it == m_blocksById.end()) {
|
||||
LOGWARN(3, "get_shares: can't find uncle block at height = " << cur->m_sidechainHeight << ", id = " << uncle_id);
|
||||
LOGWARN(3, "get_shares: can't calculate shares for block at height = " << tip->m_sidechainHeight << ", id = " << tip->m_sidechainId << ", mainchain height = " << tip->m_txinGenHeight);
|
||||
LOGWARN(L, "get_shares: can't find uncle block at height = " << cur->m_sidechainHeight << ", id = " << uncle_id);
|
||||
LOGWARN(L, "get_shares: can't calculate shares for block at height = " << tip->m_sidechainHeight << ", id = " << tip->m_sidechainId << ", mainchain height = " << tip->m_txinGenHeight);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -345,11 +395,34 @@ bool SideChain::get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares
|
|||
|
||||
// Take some % of uncle's weight into this share
|
||||
const difficulty_type uncle_penalty = uncle->m_difficulty * m_unclePenalty / 100;
|
||||
cur_share.m_weight += uncle_penalty;
|
||||
shares.emplace_back(uncle->m_difficulty - uncle_penalty, &uncle->m_minerWallet);
|
||||
const difficulty_type uncle_weight = uncle->m_difficulty - uncle_penalty;
|
||||
const difficulty_type new_pplns_weight = pplns_weight + uncle_weight;
|
||||
|
||||
// Skip uncles that push PPLNS weight above the limit
|
||||
if (new_pplns_weight > max_pplns_weight) {
|
||||
continue;
|
||||
}
|
||||
|
||||
shares.push_back(cur_share);
|
||||
cur_weight += uncle_penalty;
|
||||
|
||||
auto result = shares_set.emplace(uncle_weight, &uncle->m_minerWallet);
|
||||
if (!result.second) {
|
||||
result.first->m_weight += uncle_weight;
|
||||
}
|
||||
pplns_weight = new_pplns_weight;
|
||||
}
|
||||
|
||||
// Always add non-uncle shares even if PPLNS weight goes above the limit
|
||||
auto result = shares_set.emplace(cur_weight, &cur->m_minerWallet);
|
||||
if (!result.second) {
|
||||
result.first->m_weight += cur_weight;
|
||||
}
|
||||
pplns_weight += cur_weight;
|
||||
|
||||
// One non-uncle share can go above the limit, but it will also guarantee that "shares" is never empty
|
||||
if (pplns_weight > max_pplns_weight) {
|
||||
break;
|
||||
}
|
||||
|
||||
++block_depth;
|
||||
if (block_depth >= m_chainWindowSize) {
|
||||
|
@ -363,77 +436,39 @@ bool SideChain::get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares
|
|||
|
||||
auto it = m_blocksById.find(cur->m_parent);
|
||||
if (it == m_blocksById.end()) {
|
||||
LOGWARN(3, "get_shares: can't find parent block at height = " << cur->m_sidechainHeight - 1 << ", id = " << cur->m_parent);
|
||||
LOGWARN(3, "get_shares: can't calculate shares for block at height = " << tip->m_sidechainHeight << ", id = " << tip->m_sidechainId << ", mainchain height = " << tip->m_txinGenHeight);
|
||||
LOGWARN(L, "get_shares: can't find parent block at height = " << cur->m_sidechainHeight - 1 << ", id = " << cur->m_parent);
|
||||
LOGWARN(L, "get_shares: can't calculate shares for block at height = " << tip->m_sidechainHeight << ", id = " << tip->m_sidechainId << ", mainchain height = " << tip->m_txinGenHeight);
|
||||
return false;
|
||||
}
|
||||
|
||||
cur = it->second;
|
||||
} while (true);
|
||||
|
||||
// Combine shares with the same wallet addresses
|
||||
if (bottom_height) {
|
||||
*bottom_height = cur->m_sidechainHeight;
|
||||
}
|
||||
|
||||
shares.assign(shares_set.begin(), shares_set.end());
|
||||
std::sort(shares.begin(), shares.end(), [](const auto& a, const auto& b) { return *a.m_wallet < *b.m_wallet; });
|
||||
|
||||
size_t k = 0;
|
||||
for (size_t i = 1, n = shares.size(); i < n; ++i)
|
||||
{
|
||||
if (*shares[i].m_wallet == *shares[k].m_wallet) {
|
||||
shares[k].m_weight += shares[i].m_weight;
|
||||
}
|
||||
else {
|
||||
++k;
|
||||
shares[k].m_weight = shares[i].m_weight;
|
||||
shares[k].m_wallet = shares[i].m_wallet;
|
||||
const uint64_t n = shares.size();
|
||||
|
||||
// Shuffle shares
|
||||
if ((sidechain_version > 1) && (n > 1)) {
|
||||
hash h;
|
||||
keccak(tip->m_txkeySecSeed.h, HASH_SIZE, h.h);
|
||||
|
||||
uint64_t seed = *reinterpret_cast<uint64_t*>(h.h);
|
||||
if (seed == 0) seed = 1;
|
||||
|
||||
for (uint64_t i = 0, k; i < n - 1; ++i) {
|
||||
seed = xorshift64star(seed);
|
||||
umul128(seed, n - i, &k);
|
||||
std::swap(shares[i], shares[i + k]);
|
||||
}
|
||||
}
|
||||
|
||||
shares.resize(k + 1);
|
||||
|
||||
LOGINFO(6, "get_shares: " << k + 1 << " unique wallets in PPLNS window");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SideChain::get_wallets(const PoolBlock* tip, std::vector<const Wallet*>& wallets) const
|
||||
{
|
||||
// Collect wallets from each block in the PPLNS window, starting from the "tip"
|
||||
wallets.clear();
|
||||
wallets.reserve(m_chainWindowSize * 2);
|
||||
|
||||
uint64_t block_depth = 0;
|
||||
const PoolBlock* cur = tip;
|
||||
|
||||
do {
|
||||
wallets.push_back(&cur->m_minerWallet);
|
||||
|
||||
for (const hash& uncle_id : cur->m_uncles) {
|
||||
auto it = m_blocksById.find(uncle_id);
|
||||
if (it == m_blocksById.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Skip uncles which are already out of PPLNS window
|
||||
if (tip->m_sidechainHeight - it->second->m_sidechainHeight < m_chainWindowSize) {
|
||||
wallets.push_back(&it->second->m_minerWallet);
|
||||
}
|
||||
}
|
||||
|
||||
++block_depth;
|
||||
if ((block_depth >= m_chainWindowSize) || (cur->m_sidechainHeight == 0)) {
|
||||
break;
|
||||
}
|
||||
|
||||
auto it = m_blocksById.find(cur->m_parent);
|
||||
if (it == m_blocksById.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cur = it->second;
|
||||
} while (true);
|
||||
|
||||
// Remove duplicates
|
||||
std::sort(wallets.begin(), wallets.end(), [](const Wallet* a, const Wallet* b) { return *a < *b; });
|
||||
wallets.erase(std::unique(wallets.begin(), wallets.end(), [](const Wallet* a, const Wallet* b) { return *a == *b; }), wallets.end());
|
||||
|
||||
LOGINFO(6, "get_shares: " << n << " unique wallets in PPLNS window");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -703,7 +738,18 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v
|
|||
{
|
||||
blob.clear();
|
||||
|
||||
struct Data
|
||||
{
|
||||
FORCEINLINE Data() : counter(0) {}
|
||||
Data(Data&&) = delete;
|
||||
Data& operator=(Data&&) = delete;
|
||||
|
||||
std::vector<MinerShare> tmpShares;
|
||||
hash txkeySec;
|
||||
std::atomic<int> counter;
|
||||
};
|
||||
|
||||
std::shared_ptr<Data> data;
|
||||
std::vector<uint64_t> tmpRewards;
|
||||
{
|
||||
ReadLock lock(m_sidechainLock);
|
||||
|
@ -732,80 +778,32 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v
|
|||
return true;
|
||||
}
|
||||
|
||||
if (!get_shares(block, tmpShares) || !split_reward(total_reward, tmpShares, tmpRewards) || (tmpRewards.size() != tmpShares.size())) {
|
||||
data = std::make_shared<Data>();
|
||||
data->txkeySec = block->m_txkeySec;
|
||||
|
||||
if (!get_shares(block, data->tmpShares) || !split_reward(total_reward, data->tmpShares, tmpRewards) || (tmpRewards.size() != data->tmpShares.size())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const size_t n = tmpShares.size();
|
||||
const size_t n = data->tmpShares.size();
|
||||
data->counter = static_cast<int>(n) - 1;
|
||||
|
||||
// Helper jobs call get_eph_public_key with indices in descending order
|
||||
// Current thread will process indices in ascending order so when they meet, everything will be cached
|
||||
|
||||
std::atomic<int> counter{ 0 };
|
||||
std::atomic<int> num_helper_jobs_finished{ 0 };
|
||||
int num_helper_jobs_started = 0;
|
||||
|
||||
if (loop) {
|
||||
uint32_t HELPER_JOBS_COUNT = std::thread::hardware_concurrency();
|
||||
|
||||
// this thread will also be running, so reduce helper job count by 1
|
||||
if (HELPER_JOBS_COUNT > 0) {
|
||||
--HELPER_JOBS_COUNT;
|
||||
}
|
||||
|
||||
// No more than 8 helper jobs because our UV worker thread pool has 8 threads
|
||||
if (HELPER_JOBS_COUNT > 8) {
|
||||
HELPER_JOBS_COUNT = 8;
|
||||
}
|
||||
|
||||
struct Work
|
||||
{
|
||||
uv_work_t req;
|
||||
const std::vector<MinerShare>& tmpShares;
|
||||
const hash& txkeySec;
|
||||
std::atomic<int>& counter;
|
||||
std::atomic<int>& num_helper_jobs_finished;
|
||||
|
||||
// Fix MSVC warnings
|
||||
Work() = delete;
|
||||
Work& operator=(Work&&) = delete;
|
||||
};
|
||||
|
||||
counter = static_cast<int>(n) - 1;
|
||||
num_helper_jobs_started = HELPER_JOBS_COUNT;
|
||||
|
||||
for (size_t i = 0; i < HELPER_JOBS_COUNT; ++i) {
|
||||
Work* w = new Work{ {}, tmpShares, block->m_txkeySec, counter, num_helper_jobs_finished };
|
||||
w->req.data = w;
|
||||
|
||||
const int err = uv_queue_work(loop, &w->req,
|
||||
[](uv_work_t* req)
|
||||
{
|
||||
Work* work = reinterpret_cast<Work*>(req->data);
|
||||
parallel_run(loop, [data]() {
|
||||
Data* d = data.get();
|
||||
hash eph_public_key;
|
||||
|
||||
int index;
|
||||
while ((index = work->counter.fetch_sub(1)) >= 0) {
|
||||
while ((index = d->counter.fetch_sub(1)) >= 0) {
|
||||
uint8_t view_tag;
|
||||
if (!work->tmpShares[index].m_wallet->get_eph_public_key(work->txkeySec, static_cast<size_t>(index), eph_public_key, view_tag)) {
|
||||
if (!d->tmpShares[index].m_wallet->get_eph_public_key(d->txkeySec, static_cast<size_t>(index), eph_public_key, view_tag)) {
|
||||
LOGWARN(6, "get_eph_public_key failed at index " << index);
|
||||
}
|
||||
}
|
||||
|
||||
++work->num_helper_jobs_finished;
|
||||
},
|
||||
[](uv_work_t* req, int /*status*/)
|
||||
{
|
||||
delete reinterpret_cast<Work*>(req->data);
|
||||
});
|
||||
|
||||
if (err) {
|
||||
LOGERR(1, "get_outputs_blob: uv_queue_work failed, error " << uv_err_name(err));
|
||||
--num_helper_jobs_started;
|
||||
delete w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blob.reserve(n * 39 + 64);
|
||||
|
@ -820,10 +818,10 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v
|
|||
hash eph_public_key;
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
// stop helper jobs when they meet with current thread
|
||||
const int c = counter.load();
|
||||
const int c = data->counter.load();
|
||||
if ((c >= 0) && (static_cast<int>(i) >= c)) {
|
||||
// this will cause all helper jobs to finish immediately
|
||||
counter = -1;
|
||||
data->counter = -1;
|
||||
}
|
||||
|
||||
writeVarint(tmpRewards[i], blob);
|
||||
|
@ -831,7 +829,7 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v
|
|||
blob.emplace_back(tx_type);
|
||||
|
||||
uint8_t view_tag;
|
||||
if (!tmpShares[i].m_wallet->get_eph_public_key(block->m_txkeySec, i, eph_public_key, view_tag)) {
|
||||
if (!data->tmpShares[i].m_wallet->get_eph_public_key(data->txkeySec, i, eph_public_key, view_tag)) {
|
||||
LOGWARN(6, "get_eph_public_key failed at index " << i);
|
||||
}
|
||||
blob.insert(blob.end(), eph_public_key.h, eph_public_key.h + HASH_SIZE);
|
||||
|
@ -844,22 +842,12 @@ bool SideChain::get_outputs_blob(PoolBlock* block, uint64_t total_reward, std::v
|
|||
}
|
||||
|
||||
block->m_outputs.shrink_to_fit();
|
||||
|
||||
if (loop) {
|
||||
// this will cause all helper jobs to finish immediately
|
||||
counter = -1;
|
||||
|
||||
while (num_helper_jobs_finished < num_helper_jobs_started) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void SideChain::print_status(bool obtain_sidechain_lock) const
|
||||
{
|
||||
std::vector<hash> blocks_in_window;
|
||||
unordered_set<hash> blocks_in_window;
|
||||
blocks_in_window.reserve(m_chainWindowSize * 9 / 8);
|
||||
|
||||
const difficulty_type diff = difficulty();
|
||||
|
@ -867,51 +855,61 @@ void SideChain::print_status(bool obtain_sidechain_lock) const
|
|||
if (obtain_sidechain_lock) uv_rwlock_rdlock(&m_sidechainLock);
|
||||
ON_SCOPE_LEAVE([this, obtain_sidechain_lock]() { if (obtain_sidechain_lock) uv_rwlock_rdunlock(&m_sidechainLock); });
|
||||
|
||||
uint64_t rem;
|
||||
uint64_t pool_hashrate = udiv128(diff.hi, diff.lo, m_targetBlockTime, &rem);
|
||||
const uint64_t pool_hashrate = (diff / m_targetBlockTime).lo;
|
||||
|
||||
difficulty_type network_diff = m_pool->miner_data().difficulty;
|
||||
uint64_t network_hashrate = udiv128(network_diff.hi, network_diff.lo, MONERO_BLOCK_TIME, &rem);
|
||||
const difficulty_type network_diff = m_pool->miner_data().difficulty;
|
||||
const uint64_t network_hashrate = (network_diff / MONERO_BLOCK_TIME).lo;
|
||||
|
||||
const PoolBlock* tip = m_chainTip;
|
||||
|
||||
std::vector<MinerShare> shares;
|
||||
uint64_t bottom_height = 0;
|
||||
if (tip) {
|
||||
get_shares(tip, shares, &bottom_height, true);
|
||||
}
|
||||
|
||||
const uint64_t window_size = (tip && bottom_height) ? (tip->m_sidechainHeight - bottom_height + 1U) : m_chainWindowSize;
|
||||
|
||||
uint64_t block_depth = 0;
|
||||
const PoolBlock* cur = tip;
|
||||
const uint64_t tip_height = tip ? tip->m_sidechainHeight : 0;
|
||||
|
||||
uint32_t total_blocks_in_window = 0;
|
||||
uint32_t total_uncles_in_window = 0;
|
||||
uint64_t total_blocks_in_window = 0;
|
||||
uint64_t total_uncles_in_window = 0;
|
||||
|
||||
// each dot corresponds to m_chainWindowSize / 30 shares, with current values, 2160 / 30 = 72
|
||||
std::array<uint32_t, 30> our_blocks_in_window{};
|
||||
std::array<uint32_t, 30> our_uncles_in_window{};
|
||||
// each dot corresponds to window_size / 30 shares, with current values, 2160 / 30 = 72
|
||||
constexpr size_t N = 30;
|
||||
std::array<uint64_t, N> our_blocks_in_window{};
|
||||
std::array<uint64_t, N> our_uncles_in_window{};
|
||||
|
||||
const Wallet& w = m_pool->params().m_wallet;
|
||||
|
||||
while (cur) {
|
||||
blocks_in_window.emplace_back(cur->m_sidechainId);
|
||||
blocks_in_window.emplace(cur->m_sidechainId);
|
||||
++total_blocks_in_window;
|
||||
|
||||
if (cur->m_minerWallet == m_pool->params().m_wallet) {
|
||||
// this produces an integer division with quotient rounded up, avoids non-whole divisions from overflowing on total_blocks_in_window
|
||||
const size_t window_index = (total_blocks_in_window - 1) / ((m_chainWindowSize + our_blocks_in_window.size() - 1) / our_blocks_in_window.size());
|
||||
our_blocks_in_window[std::min(window_index, our_blocks_in_window.size() - 1)]++; // clamp window_index, even if total_blocks_in_window is not larger than m_chainWindowSize
|
||||
// "block_depth <= window_size - 1" here (see the check below), so window_index will be <= N - 1
|
||||
// This will map the range [0, window_size - 1] into [0, N - 1]
|
||||
const size_t window_index = (window_size > 1) ? (block_depth * (N - 1) / (window_size - 1)) : 0;
|
||||
|
||||
if (cur->m_minerWallet == w) {
|
||||
++our_blocks_in_window[window_index];
|
||||
}
|
||||
|
||||
++block_depth;
|
||||
if (block_depth >= m_chainWindowSize) {
|
||||
if (block_depth >= window_size) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (const hash& uncle_id : cur->m_uncles) {
|
||||
blocks_in_window.emplace_back(uncle_id);
|
||||
blocks_in_window.emplace(uncle_id);
|
||||
auto it = m_blocksById.find(uncle_id);
|
||||
if (it != m_blocksById.end()) {
|
||||
PoolBlock* uncle = it->second;
|
||||
if (tip_height - uncle->m_sidechainHeight < m_chainWindowSize) {
|
||||
if (tip_height - uncle->m_sidechainHeight < window_size) {
|
||||
++total_uncles_in_window;
|
||||
if (uncle->m_minerWallet == m_pool->params().m_wallet) {
|
||||
// this produces an integer division with quotient rounded up, avoids non-whole divisions from overflowing on total_blocks_in_window
|
||||
const size_t window_index = (total_blocks_in_window - 1) / ((m_chainWindowSize + our_uncles_in_window.size() - 1) / our_uncles_in_window.size());
|
||||
our_uncles_in_window[std::min(window_index, our_uncles_in_window.size() - 1)]++; // clamp window_index, even if total_blocks_in_window is not larger than m_chainWindowSize
|
||||
if (uncle->m_minerWallet == w) {
|
||||
++our_uncles_in_window[window_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -923,65 +921,50 @@ void SideChain::print_status(bool obtain_sidechain_lock) const
|
|||
uint64_t total_orphans = 0;
|
||||
uint64_t our_orphans = 0;
|
||||
|
||||
uint64_t your_reward = 0;
|
||||
uint64_t total_reward = 0;
|
||||
|
||||
if (tip) {
|
||||
std::sort(blocks_in_window.begin(), blocks_in_window.end());
|
||||
for (uint64_t i = 0; (i < m_chainWindowSize) && (i <= tip_height); ++i) {
|
||||
for (uint64_t i = 0; (i < window_size) && (i <= tip_height); ++i) {
|
||||
auto it = m_blocksByHeight.find(tip_height - i);
|
||||
if (it == m_blocksByHeight.end()) {
|
||||
continue;
|
||||
}
|
||||
for (const PoolBlock* block : it->second) {
|
||||
if (!std::binary_search(blocks_in_window.begin(), blocks_in_window.end(), block->m_sidechainId)) {
|
||||
if (blocks_in_window.find(block->m_sidechainId) == blocks_in_window.end()) {
|
||||
LOGINFO(4, "orphan block at height " << log::Gray() << block->m_sidechainHeight << log::NoColor() << ": " << log::Gray() << block->m_sidechainId);
|
||||
++total_orphans;
|
||||
if (block->m_minerWallet == m_pool->params().m_wallet) {
|
||||
if (block->m_minerWallet == w) {
|
||||
++our_orphans;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Wallet& w = m_pool->params().m_wallet;
|
||||
const uint8_t tx_type = tip->get_tx_type();
|
||||
|
||||
hash eph_public_key;
|
||||
for (size_t i = 0, n = tip->m_outputs.size(); i < n; ++i) {
|
||||
const PoolBlock::TxOutput& out = tip->m_outputs[i];
|
||||
if (!your_reward) {
|
||||
if (tx_type == TXOUT_TO_TAGGED_KEY) {
|
||||
uint8_t view_tag;
|
||||
const uint8_t expected_view_tag = out.m_viewTag;
|
||||
if (w.get_eph_public_key(tip->m_txkeySec, i, eph_public_key, view_tag, &expected_view_tag) && (out.m_ephPublicKey == eph_public_key)) {
|
||||
your_reward = out.m_reward;
|
||||
}
|
||||
}
|
||||
else {
|
||||
uint8_t view_tag;
|
||||
if (w.get_eph_public_key(tip->m_txkeySec, i, eph_public_key, view_tag) && (out.m_ephPublicKey == eph_public_key)) {
|
||||
your_reward = out.m_reward;
|
||||
}
|
||||
}
|
||||
}
|
||||
total_reward += out.m_reward;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t product[2];
|
||||
product[0] = umul128(pool_hashrate, your_reward, &product[1]);
|
||||
const uint64_t hashrate_est = total_reward ? udiv128(product[1], product[0], total_reward, &rem) : 0;
|
||||
difficulty_type your_shares_weight, pplns_weight;
|
||||
for (const MinerShare& s : shares) {
|
||||
if (*s.m_wallet == w) {
|
||||
your_shares_weight = s.m_weight;
|
||||
}
|
||||
pplns_weight += s.m_weight;
|
||||
}
|
||||
|
||||
if (pplns_weight == 0) {
|
||||
pplns_weight = m_minDifficulty;
|
||||
}
|
||||
|
||||
const uint64_t total_reward = m_pool->block_template().get_reward();
|
||||
const uint64_t your_reward = ((your_shares_weight * total_reward) / pplns_weight).lo;
|
||||
const uint64_t hashrate_est = ((your_shares_weight * pool_hashrate) / pplns_weight).lo;
|
||||
|
||||
const double block_share = total_reward ? ((static_cast<double>(your_reward) * 100.0) / static_cast<double>(total_reward)) : 0.0;
|
||||
|
||||
const uint32_t our_blocks_in_window_total = std::accumulate(our_blocks_in_window.begin(), our_blocks_in_window.end(), 0U);
|
||||
const uint32_t our_uncles_in_window_total = std::accumulate(our_uncles_in_window.begin(), our_uncles_in_window.end(), 0U);
|
||||
const uint64_t our_blocks_in_window_total = std::accumulate(our_blocks_in_window.begin(), our_blocks_in_window.end(), 0ULL);
|
||||
const uint64_t our_uncles_in_window_total = std::accumulate(our_uncles_in_window.begin(), our_uncles_in_window.end(), 0ULL);
|
||||
|
||||
std::string our_blocks_in_window_chart;
|
||||
if (our_blocks_in_window_total) {
|
||||
our_blocks_in_window_chart.reserve(our_blocks_in_window.size() + 32);
|
||||
our_blocks_in_window_chart = "\nYour shares position = [";
|
||||
for (uint32_t p : our_blocks_in_window) {
|
||||
for (uint64_t p : our_blocks_in_window) {
|
||||
our_blocks_in_window_chart += (p ? ((p > 9) ? '+' : static_cast<char>('0' + p)) : '.');
|
||||
}
|
||||
our_blocks_in_window_chart += ']';
|
||||
|
@ -991,13 +974,14 @@ void SideChain::print_status(bool obtain_sidechain_lock) const
|
|||
if (our_uncles_in_window_total) {
|
||||
our_uncles_in_window_chart.reserve(our_uncles_in_window.size() + 32);
|
||||
our_uncles_in_window_chart = "\nYour uncles position = [";
|
||||
for (uint32_t p : our_uncles_in_window) {
|
||||
for (uint64_t p : our_uncles_in_window) {
|
||||
our_uncles_in_window_chart += (p ? ((p > 9) ? '+' : static_cast<char>('0' + p)) : '.');
|
||||
}
|
||||
our_uncles_in_window_chart += ']';
|
||||
}
|
||||
|
||||
LOGINFO(0, "status" <<
|
||||
"\nMonero node = " << m_pool->host_str() <<
|
||||
"\nMain chain height = " << m_pool->block_template().height() <<
|
||||
"\nMain chain hashrate = " << log::Hashrate(network_hashrate) <<
|
||||
"\nSide chain ID = " << (is_default() ? "default" : (is_mini() ? "mini" : m_consensusIdDisplayStr.c_str())) <<
|
||||
|
@ -1005,7 +989,8 @@ void SideChain::print_status(bool obtain_sidechain_lock) const
|
|||
"\nSide chain hashrate = " << log::Hashrate(pool_hashrate) <<
|
||||
(hashrate_est ? "\nYour hashrate (pool-side) = " : "") << (hashrate_est ? log::Hashrate(hashrate_est) : log::Hashrate()) <<
|
||||
"\nPPLNS window = " << total_blocks_in_window << " blocks (+" << total_uncles_in_window << " uncles, " << total_orphans << " orphans)" <<
|
||||
"\nYour wallet address = " << m_pool->params().m_wallet <<
|
||||
"\nPPLNS window duration = " << log::Duration((pplns_weight / pool_hashrate).lo) <<
|
||||
"\nYour wallet address = " << w <<
|
||||
"\nYour shares = " << our_blocks_in_window_total << " blocks (+" << our_uncles_in_window_total << " uncles, " << our_orphans << " orphans)"
|
||||
<< our_blocks_in_window_chart << our_uncles_in_window_chart <<
|
||||
"\nBlock reward share = " << block_share << "% (" << log::XMRAmount(your_reward) << ')'
|
||||
|
@ -1047,12 +1032,12 @@ double SideChain::get_reward_share(const Wallet& w) const
|
|||
return total_reward ? (static_cast<double>(reward) / static_cast<double>(total_reward)) : 0.0;
|
||||
}
|
||||
|
||||
uint64_t SideChain::network_major_version(uint64_t height) const
|
||||
uint64_t SideChain::network_major_version(uint64_t height)
|
||||
{
|
||||
const hardfork_t* hard_forks;
|
||||
size_t num_hard_forks;
|
||||
|
||||
switch (m_networkType)
|
||||
switch (s_networkType)
|
||||
{
|
||||
case NetworkType::Mainnet:
|
||||
default:
|
||||
|
@ -1338,12 +1323,15 @@ void SideChain::verify_loop(PoolBlock* block)
|
|||
|
||||
void SideChain::verify(PoolBlock* block)
|
||||
{
|
||||
const int sidechain_version = block->get_sidechain_version();
|
||||
|
||||
// Genesis block
|
||||
if (block->m_sidechainHeight == 0) {
|
||||
if (!block->m_parent.empty() ||
|
||||
!block->m_uncles.empty() ||
|
||||
(block->m_difficulty != m_minDifficulty) ||
|
||||
(block->m_cumulativeDifficulty != m_minDifficulty))
|
||||
(block->m_cumulativeDifficulty != m_minDifficulty) ||
|
||||
((sidechain_version > 1) && (block->m_txkeySecSeed != m_consensusHash)))
|
||||
{
|
||||
block->m_invalid = true;
|
||||
}
|
||||
|
@ -1389,12 +1377,24 @@ void SideChain::verify(PoolBlock* block)
|
|||
return;
|
||||
}
|
||||
|
||||
if (sidechain_version > 1) {
|
||||
// Check m_txkeySecSeed
|
||||
const hash h = (block->m_prevId == parent->m_prevId) ? parent->m_txkeySecSeed : parent->calculate_tx_key_seed();
|
||||
if (block->m_txkeySecSeed != h) {
|
||||
LOGWARN(3, "block " << block->m_sidechainId << " has invalid tx key seed: expected " << h << ", got " << block->m_txkeySecSeed);
|
||||
block->m_verified = true;
|
||||
block->m_invalid = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const uint64_t expectedHeight = parent->m_sidechainHeight + 1;
|
||||
if (block->m_sidechainHeight != expectedHeight) {
|
||||
LOGWARN(3, "block at height = " << block->m_sidechainHeight <<
|
||||
", id = " << block->m_sidechainId <<
|
||||
", mainchain height = " << block->m_txinGenHeight <<
|
||||
" has wrong height: expected " << expectedHeight);
|
||||
block->m_verified = true;
|
||||
block->m_invalid = true;
|
||||
return;
|
||||
}
|
||||
|
@ -1658,6 +1658,11 @@ void SideChain::update_chain_tip(const PoolBlock* block)
|
|||
|
||||
const PoolBlock* tip = m_chainTip;
|
||||
|
||||
if (block == tip) {
|
||||
LOGINFO(5, "Trying to update chain tip to the same block again. Ignoring it.");
|
||||
return;
|
||||
}
|
||||
|
||||
bool is_alternative;
|
||||
if (is_longer_chain(tip, block, is_alternative)) {
|
||||
difficulty_type diff;
|
||||
|
@ -2016,7 +2021,7 @@ bool SideChain::load_config(const std::string& filename)
|
|||
parseValue(doc, "block_time", m_targetBlockTime);
|
||||
|
||||
uint64_t min_diff;
|
||||
if (parseValue(doc, "min_diff", min_diff)) {
|
||||
if (parseValue(doc, "min_diff", min_diff) && min_diff) {
|
||||
m_minDifficulty = { min_diff, 0 };
|
||||
}
|
||||
|
||||
|
@ -2048,6 +2053,7 @@ bool SideChain::check_config() const
|
|||
return false;
|
||||
}
|
||||
|
||||
if (s_networkType == NetworkType::Mainnet) {
|
||||
const difficulty_type min_diff{ MIN_DIFFICULTY, 0 };
|
||||
const difficulty_type max_diff{ 1000000000, 0 };
|
||||
|
||||
|
@ -2055,6 +2061,7 @@ bool SideChain::check_config() const
|
|||
LOGERR(1, "min_diff is invalid (must be between " << min_diff << " and " << max_diff << ')');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if ((m_chainWindowSize < 60) || (m_chainWindowSize > 2160)) {
|
||||
LOGERR(1, "pplns_window is invalid (must be between 60 and 2160)");
|
||||
|
@ -2090,10 +2097,10 @@ void SideChain::launch_precalc(const PoolBlock* block)
|
|||
if (b->m_precalculated) {
|
||||
continue;
|
||||
}
|
||||
std::vector<const Wallet*> wallets;
|
||||
if (get_wallets(b, wallets)) {
|
||||
std::vector<MinerShare> shares;
|
||||
if (get_shares(b, shares, nullptr, true)) {
|
||||
b->m_precalculated = true;
|
||||
PrecalcJob* job = new PrecalcJob{ b, std::move(wallets) };
|
||||
PrecalcJob* job = new PrecalcJob{ b, std::move(shares) };
|
||||
{
|
||||
MutexLock lock2(m_precalcJobsMutex);
|
||||
m_precalcJobs.push_back(job);
|
||||
|
@ -2130,20 +2137,20 @@ void SideChain::precalc_worker()
|
|||
uint8_t t[HASH_SIZE * 2 + sizeof(size_t)];
|
||||
memcpy(t, job->b->m_txkeySec.h, HASH_SIZE);
|
||||
|
||||
for (size_t i = 0, n = job->wallets.size(); i < n; ++i) {
|
||||
memcpy(t + HASH_SIZE, job->wallets[i]->view_public_key().h, HASH_SIZE);
|
||||
for (size_t i = 0, n = job->shares.size(); i < n; ++i) {
|
||||
memcpy(t + HASH_SIZE, job->shares[i].m_wallet->view_public_key().h, HASH_SIZE);
|
||||
memcpy(t + HASH_SIZE * 2, &i, sizeof(i));
|
||||
if (!m_uniquePrecalcInputs->insert(robin_hood::hash_bytes(t, array_size(t))).second) {
|
||||
job->wallets[i] = nullptr;
|
||||
job->shares[i].m_wallet = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0, n = job->wallets.size(); i < n; ++i) {
|
||||
if (job->wallets[i]) {
|
||||
for (size_t i = 0, n = job->shares.size(); i < n; ++i) {
|
||||
if (job->shares[i].m_wallet) {
|
||||
hash eph_public_key;
|
||||
uint8_t view_tag;
|
||||
job->wallets[i]->get_eph_public_key(job->b->m_txkeySec, i, eph_public_key, view_tag);
|
||||
job->shares[i].m_wallet->get_eph_public_key(job->b->m_txkeySec, i, eph_public_key, view_tag);
|
||||
}
|
||||
}
|
||||
delete job;
|
||||
|
|
|
@ -32,6 +32,8 @@ struct MinerShare
|
|||
FORCEINLINE MinerShare() : m_weight(), m_wallet(nullptr) {}
|
||||
FORCEINLINE MinerShare(const difficulty_type& w, const Wallet* x) : m_weight(w), m_wallet(x) {}
|
||||
|
||||
FORCEINLINE bool operator==(const MinerShare& s) const { return *m_wallet == *s.m_wallet; }
|
||||
|
||||
difficulty_type m_weight;
|
||||
const Wallet* m_wallet;
|
||||
};
|
||||
|
@ -42,7 +44,7 @@ public:
|
|||
SideChain(p2pool* pool, NetworkType type, const char* pool_name = nullptr);
|
||||
~SideChain();
|
||||
|
||||
void fill_sidechain_data(PoolBlock& block, const Wallet* w, const hash& txkeySec, std::vector<MinerShare>& shares) const;
|
||||
void fill_sidechain_data(PoolBlock& block, std::vector<MinerShare>& shares) const;
|
||||
|
||||
bool block_seen(const PoolBlock& block);
|
||||
void unsee_block(const PoolBlock& block);
|
||||
|
@ -64,8 +66,8 @@ public:
|
|||
// Consensus ID can therefore be used as a password to create private P2Pools
|
||||
const std::vector<uint8_t>& consensus_id() const { return m_consensusId; }
|
||||
uint64_t chain_window_size() const { return m_chainWindowSize; }
|
||||
NetworkType network_type() const { return m_networkType; }
|
||||
uint64_t network_major_version(uint64_t height) const;
|
||||
static NetworkType network_type() { return s_networkType; }
|
||||
static uint64_t network_major_version(uint64_t height);
|
||||
FORCEINLINE difficulty_type difficulty() const { ReadLock lock(m_curDifficultyLock); return m_curDifficulty; }
|
||||
difficulty_type total_hashes() const;
|
||||
uint64_t block_time() const { return m_targetBlockTime; }
|
||||
|
@ -77,17 +79,20 @@ public:
|
|||
const PoolBlock* chainTip() const { return m_chainTip; }
|
||||
bool precalcFinished() const { return m_precalcFinished.load(); }
|
||||
|
||||
#ifdef P2POOL_UNIT_TESTS
|
||||
difficulty_type m_testMainChainDiff;
|
||||
#endif
|
||||
|
||||
static bool split_reward(uint64_t reward, const std::vector<MinerShare>& shares, std::vector<uint64_t>& rewards);
|
||||
|
||||
private:
|
||||
p2pool* m_pool;
|
||||
P2PServer* p2pServer() const;
|
||||
NetworkType m_networkType;
|
||||
static NetworkType s_networkType;
|
||||
|
||||
private:
|
||||
bool get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares) const;
|
||||
bool get_shares(const PoolBlock* tip, std::vector<MinerShare>& shares, uint64_t* bottom_height = nullptr, bool quiet = false) const;
|
||||
bool get_difficulty(const PoolBlock* tip, std::vector<DifficultyData>& difficultyData, difficulty_type& curDifficulty) const;
|
||||
bool get_wallets(const PoolBlock* tip, std::vector<const Wallet*>& wallets) const;
|
||||
void verify_loop(PoolBlock* block);
|
||||
void verify(PoolBlock* block);
|
||||
void update_chain_tip(const PoolBlock* block);
|
||||
|
@ -134,7 +139,7 @@ private:
|
|||
struct PrecalcJob
|
||||
{
|
||||
const PoolBlock* b;
|
||||
std::vector<const Wallet*> wallets;
|
||||
std::vector<MinerShare> shares;
|
||||
};
|
||||
|
||||
uv_cond_t m_precalcJobsCond;
|
||||
|
@ -146,9 +151,24 @@ private:
|
|||
|
||||
std::atomic<bool> m_precalcFinished;
|
||||
|
||||
hash m_consensusHash;
|
||||
|
||||
void launch_precalc(const PoolBlock* block);
|
||||
void precalc_worker();
|
||||
void finish_precalc();
|
||||
};
|
||||
|
||||
} // namespace p2pool
|
||||
|
||||
namespace robin_hood {
|
||||
|
||||
template<>
|
||||
struct hash<p2pool::MinerShare>
|
||||
{
|
||||
FORCEINLINE size_t operator()(const p2pool::MinerShare& value) const noexcept
|
||||
{
|
||||
return hash_bytes(value.m_wallet->spend_public_key().h, p2pool::HASH_SIZE);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace robin_hood
|
||||
|
|
|
@ -60,27 +60,26 @@ StratumServer::StratumServer(p2pool* pool)
|
|||
// Diffuse the initial state in case it has low quality
|
||||
m_rng.discard(10000);
|
||||
|
||||
m_extraNonce = static_cast<uint32_t>(m_rng());
|
||||
|
||||
m_hashrateData[0] = { seconds_since_epoch(), 0 };
|
||||
|
||||
uv_mutex_init_checked(&m_blobsQueueLock);
|
||||
uv_mutex_init_checked(&m_rngLock);
|
||||
uv_rwlock_init_checked(&m_hashrateDataLock);
|
||||
|
||||
m_extraNonce = PoolBlock::signal_v2_readiness(get_random32());
|
||||
|
||||
m_submittedSharesPool.resize(10);
|
||||
for (size_t i = 0; i < m_submittedSharesPool.size(); ++i) {
|
||||
m_submittedSharesPool[i] = new SubmittedShare{};
|
||||
}
|
||||
|
||||
const int err = uv_async_init(&m_loop, &m_blobsAsync, on_blobs_ready);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
}
|
||||
uv_async_init_checked(&m_loop, &m_blobsAsync, on_blobs_ready);
|
||||
m_blobsAsync.data = this;
|
||||
m_blobsQueue.reserve(2);
|
||||
|
||||
uv_async_init_checked(&m_loop, &m_showWorkersAsync, on_show_workers);
|
||||
m_showWorkersAsync.data = this;
|
||||
|
||||
start_listening(pool->params().m_stratumAddresses);
|
||||
}
|
||||
|
||||
|
@ -107,7 +106,7 @@ void StratumServer::on_block(const BlockTemplate& block)
|
|||
return;
|
||||
}
|
||||
|
||||
const uint32_t extra_nonce_start = static_cast<uint32_t>(get_random64());
|
||||
const uint32_t extra_nonce_start = PoolBlock::signal_v2_readiness(get_random32());
|
||||
m_extraNonce.exchange(extra_nonce_start + num_connections);
|
||||
|
||||
BlobsData* blobs_data = new BlobsData{};
|
||||
|
@ -278,12 +277,13 @@ bool StratumServer::on_login(StratumClient* client, uint32_t id, const char* log
|
|||
saved_job.template_id = template_id;
|
||||
saved_job.target = target;
|
||||
}
|
||||
client->m_lastJobTarget = target;
|
||||
|
||||
const bool result = send(client,
|
||||
[client, id, &hashing_blob, job_id, blob_size, target, height, &seed_hash](void* buf, size_t buf_size)
|
||||
{
|
||||
do {
|
||||
client->m_rpcId = static_cast<uint32_t>(static_cast<StratumServer*>(client->m_owner)->get_random64());
|
||||
client->m_rpcId = static_cast<StratumServer*>(client->m_owner)->get_random32();
|
||||
} while (!client->m_rpcId);
|
||||
|
||||
log::hex_buf target_hex(reinterpret_cast<const uint8_t*>(&target), sizeof(uint64_t));
|
||||
|
@ -463,10 +463,10 @@ bool StratumServer::on_submit(StratumClient* client, uint32_t id, const char* jo
|
|||
return result;
|
||||
}
|
||||
|
||||
uint64_t StratumServer::get_random64()
|
||||
uint32_t StratumServer::get_random32()
|
||||
{
|
||||
MutexLock lock(m_rngLock);
|
||||
return m_rng();
|
||||
return static_cast<uint32_t>(m_rng() >> 32);
|
||||
}
|
||||
|
||||
void StratumServer::print_status()
|
||||
|
@ -475,6 +475,13 @@ void StratumServer::print_status()
|
|||
print_stratum_status();
|
||||
}
|
||||
|
||||
void StratumServer::show_workers_async()
|
||||
{
|
||||
if (!uv_is_closing(reinterpret_cast<uv_handle_t*>(&m_showWorkersAsync))) {
|
||||
uv_async_send(&m_showWorkersAsync);
|
||||
}
|
||||
}
|
||||
|
||||
void StratumServer::show_workers()
|
||||
{
|
||||
const uint64_t cur_time = seconds_since_epoch();
|
||||
|
@ -497,15 +504,14 @@ void StratumServer::show_workers()
|
|||
);
|
||||
|
||||
for (const StratumClient* c = static_cast<StratumClient*>(m_connectedClientsList->m_next); c != m_connectedClientsList; c = static_cast<StratumClient*>(c->m_next)) {
|
||||
difficulty_type diff;
|
||||
if (c->m_customDiff != 0) {
|
||||
diff = c->m_customDiff;
|
||||
difficulty_type diff = pool_diff;
|
||||
if (c->m_lastJobTarget > 1) {
|
||||
uint64_t r;
|
||||
diff.lo = udiv128(1, 0, c->m_lastJobTarget, &r);
|
||||
diff.hi = 0;
|
||||
if (r) {
|
||||
++diff.lo;
|
||||
}
|
||||
else if (m_autoDiff && (c->m_autoDiff != 0)) {
|
||||
diff = c->m_autoDiff;
|
||||
}
|
||||
else {
|
||||
diff = pool_diff;
|
||||
}
|
||||
LOGINFO(0, log::pad_right(static_cast<const char*>(c->m_addrString), addr_len + 8)
|
||||
<< log::pad_right(log::Duration(cur_time - c->m_connectedTime), 20)
|
||||
|
@ -585,7 +591,7 @@ void StratumServer::print_stratum_status() const
|
|||
"\nHashrate (1h est) = " << log::Hashrate(hashrate_1h) <<
|
||||
"\nHashrate (24h est) = " << log::Hashrate(hashrate_24h) <<
|
||||
"\nTotal hashes = " << total_hashes <<
|
||||
"\nShares found = " << shares_found << shares_failed_buf <<
|
||||
"\nShares found = " << shares_found << static_cast<const char*>(shares_failed_buf) <<
|
||||
"\nAverage effort = " << average_effort << '%' <<
|
||||
"\nCurrent effort = " << static_cast<double>(hashes_since_last_share) * 100.0 / m_pool->side_chain().difficulty().to_double() << '%' <<
|
||||
"\nConnections = " << m_numConnections.load() << " (" << m_numIncomingConnections.load() << " incoming)"
|
||||
|
@ -756,9 +762,10 @@ void StratumServer::on_blobs_ready()
|
|||
saved_job.template_id = data->m_templateId;
|
||||
saved_job.target = target;
|
||||
}
|
||||
client->m_lastJobTarget = target;
|
||||
|
||||
const bool result = send(client,
|
||||
[data, target, hashing_blob, &job_id](void* buf, size_t buf_size)
|
||||
[data, target, hashing_blob, job_id](void* buf, size_t buf_size)
|
||||
{
|
||||
log::hex_buf target_hex(reinterpret_cast<const uint8_t*>(&target), sizeof(uint64_t));
|
||||
|
||||
|
@ -828,12 +835,19 @@ void StratumServer::update_hashrate_data(uint64_t hashes, uint64_t timestamp)
|
|||
void StratumServer::on_share_found(uv_work_t* req)
|
||||
{
|
||||
SubmittedShare* share = reinterpret_cast<SubmittedShare*>(req->data);
|
||||
StratumServer* server = share->m_server;
|
||||
|
||||
if (server->is_banned(share->m_clientAddr)) {
|
||||
share->m_highEnoughDifficulty = false;
|
||||
share->m_result = SubmittedShare::Result::BANNED;
|
||||
return;
|
||||
}
|
||||
|
||||
if (share->m_highEnoughDifficulty) {
|
||||
BACKGROUND_JOB_START(StratumServer::on_share_found);
|
||||
}
|
||||
|
||||
StratumClient* client = share->m_client;
|
||||
StratumServer* server = share->m_server;
|
||||
p2pool* pool = server->m_pool;
|
||||
|
||||
const uint64_t target = share->m_target;
|
||||
|
@ -952,6 +966,9 @@ void StratumServer::on_after_share_found(uv_work_t* req, int /*status*/)
|
|||
case SubmittedShare::Result::INVALID_POW:
|
||||
s << "{\"id\":" << share->m_id << ",\"jsonrpc\":\"2.0\",\"error\":{\"message\":\"Invalid PoW\"}}\n";
|
||||
break;
|
||||
case SubmittedShare::Result::BANNED:
|
||||
s << "{\"id\":" << share->m_id << ",\"jsonrpc\":\"2.0\",\"error\":{\"message\":\"Banned\"}}\n";
|
||||
break;
|
||||
case SubmittedShare::Result::OK:
|
||||
s << "{\"id\":" << share->m_id << ",\"jsonrpc\":\"2.0\",\"error\":null,\"result\":{\"status\":\"OK\"}}\n";
|
||||
break;
|
||||
|
@ -975,6 +992,7 @@ void StratumServer::on_after_share_found(uv_work_t* req, int /*status*/)
|
|||
void StratumServer::on_shutdown()
|
||||
{
|
||||
uv_close(reinterpret_cast<uv_handle_t*>(&m_blobsAsync), nullptr);
|
||||
uv_close(reinterpret_cast<uv_handle_t*>(&m_showWorkersAsync), nullptr);
|
||||
}
|
||||
|
||||
StratumServer::StratumClient::StratumClient()
|
||||
|
@ -988,6 +1006,7 @@ StratumServer::StratumClient::StratumClient()
|
|||
, m_customDiff{}
|
||||
, m_autoDiff{}
|
||||
, m_customUser{}
|
||||
, m_lastJobTarget(0)
|
||||
, m_score(0)
|
||||
{
|
||||
}
|
||||
|
@ -1009,6 +1028,8 @@ void StratumServer::StratumClient::reset()
|
|||
m_autoDiff = {};
|
||||
m_customUser[0] = '\0';
|
||||
|
||||
m_lastJobTarget = 0;
|
||||
|
||||
m_score = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,15 +79,17 @@ public:
|
|||
difficulty_type m_autoDiff;
|
||||
char m_customUser[32];
|
||||
|
||||
uint64_t m_lastJobTarget;
|
||||
|
||||
int32_t m_score;
|
||||
};
|
||||
|
||||
bool on_login(StratumClient* client, uint32_t id, const char* login);
|
||||
bool on_submit(StratumClient* client, uint32_t id, const char* job_id_str, const char* nonce_str, const char* result_str);
|
||||
uint64_t get_random64();
|
||||
uint32_t get_random32();
|
||||
|
||||
void print_status() override;
|
||||
void show_workers();
|
||||
void show_workers_async();
|
||||
|
||||
void reset_share_counters();
|
||||
|
||||
|
@ -120,6 +122,11 @@ private:
|
|||
static void on_blobs_ready(uv_async_t* handle) { reinterpret_cast<StratumServer*>(handle->data)->on_blobs_ready(); }
|
||||
void on_blobs_ready();
|
||||
|
||||
uv_async_t m_showWorkersAsync;
|
||||
|
||||
static void on_show_workers(uv_async_t* handle) { reinterpret_cast<StratumServer*>(handle->data)->show_workers(); }
|
||||
void show_workers();
|
||||
|
||||
std::atomic<uint32_t> m_extraNonce;
|
||||
|
||||
uv_mutex_t m_rngLock;
|
||||
|
@ -152,6 +159,7 @@ private:
|
|||
COULDNT_CHECK_POW,
|
||||
LOW_DIFF,
|
||||
INVALID_POW,
|
||||
BANNED,
|
||||
OK
|
||||
} m_result;
|
||||
};
|
||||
|
|
|
@ -102,9 +102,10 @@ public:
|
|||
|
||||
struct WriteBuf
|
||||
{
|
||||
Client* m_client = nullptr;
|
||||
uv_write_t m_write = {};
|
||||
std::vector<uint8_t> m_data;
|
||||
Client* m_client = nullptr;
|
||||
void* m_data = nullptr;
|
||||
size_t m_dataCapacity = 0;
|
||||
};
|
||||
|
||||
std::vector<WriteBuf*> m_writeBuffers;
|
||||
|
|
|
@ -41,7 +41,7 @@ TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::TCPServer(allocate_client_callback all
|
|||
int err = uv_loop_init(&m_loop);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to create event loop, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
// Init loop user data before running it
|
||||
|
@ -50,14 +50,14 @@ TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::TCPServer(allocate_client_callback all
|
|||
err = uv_async_init(&m_loop, &m_dropConnectionsAsync, on_drop_connections);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_dropConnectionsAsync.data = this;
|
||||
|
||||
err = uv_async_init(&m_loop, &m_shutdownAsync, on_shutdown);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
m_shutdownAsync.data = this;
|
||||
|
||||
|
@ -133,7 +133,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
{
|
||||
if (listen_addresses.empty()) {
|
||||
LOGERR(1, "listen address not set");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
parse_address_list(listen_addresses,
|
||||
|
@ -144,7 +144,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
}
|
||||
else if (m_listenPort != port) {
|
||||
LOGERR(1, "all sockets must be listening on the same port number, fix the command line");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
uv_tcp_t* socket = new uv_tcp_t();
|
||||
|
@ -159,14 +159,14 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
int err = uv_tcp_init(&m_loop, socket);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to create tcp server handle, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
socket->data = this;
|
||||
|
||||
err = uv_tcp_nodelay(socket, 1);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to set tcp_nodelay on tcp server handle, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
if (is_v6) {
|
||||
|
@ -174,13 +174,13 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
err = uv_ip6_addr(ip.c_str(), port, &addr6);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to parse IPv6 address " << ip << ", error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
err = uv_tcp_bind(socket, reinterpret_cast<sockaddr*>(&addr6), UV_TCP_IPV6ONLY);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to bind tcp server IPv6 socket " << address << ", error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -188,20 +188,20 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
err = uv_ip4_addr(ip.c_str(), port, &addr);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to parse IPv4 address " << ip << ", error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
err = uv_tcp_bind(socket, reinterpret_cast<sockaddr*>(&addr), 0);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to bind tcp server IPv4 socket " << address << ", error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
err = uv_listen(reinterpret_cast<uv_stream_t*>(socket), DEFAULT_BACKLOG, on_new_connection);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to listen on tcp server socket " << address << ", error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
LOGINFO(1, "listening on " << log::Gray() << address);
|
||||
|
@ -210,7 +210,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::start_listening(const std::string
|
|||
const int err = uv_thread_create(&m_loopThread, loop, this);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to start event loop thread, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -376,7 +376,7 @@ bool TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::connect_to_peer(Client* client)
|
|||
|
||||
err = uv_tcp_connect(connect_request, &client->m_socket, reinterpret_cast<sockaddr*>(&addr), on_connect);
|
||||
if (err) {
|
||||
LOGERR(1, "failed to initiate tcp connection to " << static_cast<const char*>(client->m_addrString) << ", error " << uv_err_name(err));
|
||||
LOGWARN(5, "failed to initiate tcp connection to " << static_cast<const char*>(client->m_addrString) << ", error " << uv_err_name(err));
|
||||
m_pendingConnections.erase(client->m_addr);
|
||||
uv_close(reinterpret_cast<uv_handle_t*>(&client->m_socket), on_connection_error);
|
||||
return false;
|
||||
|
@ -509,7 +509,7 @@ bool TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::send_internal(Client* client, Sen
|
|||
|
||||
if (bytes_written > WRITE_BUF_SIZE) {
|
||||
LOGERR(0, "send callback wrote " << bytes_written << " bytes, expected no more than " << WRITE_BUF_SIZE << " bytes");
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
|
||||
if (bytes_written == 0) {
|
||||
|
@ -518,13 +518,22 @@ bool TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::send_internal(Client* client, Sen
|
|||
return true;
|
||||
}
|
||||
|
||||
buf->m_client = client;
|
||||
buf->m_write.data = buf;
|
||||
buf->m_data.reserve(round_up(bytes_written, 64));
|
||||
buf->m_data.assign(callback_buf, callback_buf + bytes_written);
|
||||
buf->m_client = client;
|
||||
|
||||
if (buf->m_dataCapacity < bytes_written) {
|
||||
buf->m_dataCapacity = round_up(bytes_written, 64);
|
||||
buf->m_data = realloc_hook(buf->m_data, buf->m_dataCapacity);
|
||||
if (!buf->m_data) {
|
||||
LOGERR(0, "failed to allocate " << buf->m_dataCapacity << " bytes to send data");
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(buf->m_data, callback_buf, bytes_written);
|
||||
|
||||
uv_buf_t bufs[1];
|
||||
bufs[0].base = reinterpret_cast<char*>(buf->m_data.data());
|
||||
bufs[0].base = reinterpret_cast<char*>(buf->m_data);
|
||||
bufs[0].len = static_cast<int>(bytes_written);
|
||||
|
||||
const int err = uv_write(&buf->m_write, reinterpret_cast<uv_stream_t*>(&client->m_socket), bufs, 1, Client::on_write);
|
||||
|
@ -562,6 +571,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::loop(void* data)
|
|||
}
|
||||
|
||||
for (WriteBuf* buf : server->m_writeBuffers) {
|
||||
free_hook(buf->m_data);
|
||||
delete buf;
|
||||
}
|
||||
server->m_writeBuffers.clear();
|
||||
|
@ -810,7 +820,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_shutdown(uv_async_t* async)
|
|||
delete GetLoopUserData(&s->m_loop, false);
|
||||
|
||||
s->m_numHandles = 0;
|
||||
uv_walk(&s->m_loop, [](uv_handle_t*, void* n) { (*reinterpret_cast<size_t*>(n))++; }, &s->m_numHandles);
|
||||
uv_walk(&s->m_loop, [](uv_handle_t*, void* n) { (*reinterpret_cast<uint32_t*>(n))++; }, &s->m_numHandles);
|
||||
|
||||
uv_prepare_init(&s->m_loop, &s->m_shutdownPrepare);
|
||||
s->m_shutdownPrepare.data = s;
|
||||
|
@ -843,7 +853,7 @@ void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::on_shutdown(uv_async_t* async)
|
|||
TCPServer* s = reinterpret_cast<TCPServer*>(h->data);
|
||||
|
||||
s->m_numHandles = 0;
|
||||
uv_walk(&s->m_loop, [](uv_handle_t*, void* n) { (*reinterpret_cast<size_t*>(n))++; }, &s->m_numHandles);
|
||||
uv_walk(&s->m_loop, [](uv_handle_t*, void* n) { (*reinterpret_cast<uint32_t*>(n))++; }, &s->m_numHandles);
|
||||
|
||||
if (s->m_numHandles > 2) {
|
||||
// Don't count m_shutdownTimer and m_shutdownPrepare
|
||||
|
|
25
src/util.cpp
25
src/util.cpp
|
@ -29,9 +29,6 @@ static constexpr char log_category_prefix[] = "Util ";
|
|||
|
||||
namespace p2pool {
|
||||
|
||||
#define STR2(X) STR(X)
|
||||
#define STR(X) #X
|
||||
|
||||
const char* VERSION = "v" STR2(P2POOL_VERSION_MAJOR) "." STR2(P2POOL_VERSION_MINOR) " (built"
|
||||
#if defined(__clang__)
|
||||
" with clang/" __clang_version__
|
||||
|
@ -42,13 +39,12 @@ const char* VERSION = "v" STR2(P2POOL_VERSION_MAJOR) "." STR2(P2POOL_VERSION_MIN
|
|||
#endif
|
||||
" on " __DATE__ ")";
|
||||
|
||||
#undef STR2
|
||||
#undef STR
|
||||
|
||||
MinerCallbackHandler::~MinerCallbackHandler() {}
|
||||
|
||||
void panic()
|
||||
void panic_stop(const char* message)
|
||||
{
|
||||
fprintf(stderr, "P2Pool can't continue execution: panic at %s\n", message);
|
||||
|
||||
p2pool::log::stop();
|
||||
do {
|
||||
#ifdef _WIN32
|
||||
|
@ -254,7 +250,7 @@ void uv_cond_init_checked(uv_cond_t* cond)
|
|||
const int result = uv_cond_init(cond);
|
||||
if (result) {
|
||||
LOGERR(1, "failed to create conditional variable, error " << uv_err_name(result));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,7 +259,7 @@ void uv_mutex_init_checked(uv_mutex_t* mutex)
|
|||
const int result = uv_mutex_init(mutex);
|
||||
if (result) {
|
||||
LOGERR(1, "failed to create mutex, error " << uv_err_name(result));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -272,7 +268,7 @@ void uv_rwlock_init_checked(uv_rwlock_t* lock)
|
|||
const int result = uv_rwlock_init(lock);
|
||||
if (result) {
|
||||
LOGERR(1, "failed to create rwlock, error " << uv_err_name(result));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -281,7 +277,7 @@ void uv_async_init_checked(uv_loop_t* loop, uv_async_t* async, uv_async_cb async
|
|||
const int err = uv_async_init(loop, async, async_cb);
|
||||
if (err) {
|
||||
LOGERR(1, "uv_async_init failed, error " << uv_err_name(err));
|
||||
panic();
|
||||
PANIC_STOP();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -424,7 +420,12 @@ bool resolve_host(std::string& host, bool& is_v6)
|
|||
hints.ai_flags = AI_ADDRCONFIG;
|
||||
|
||||
addrinfo* r = nullptr;
|
||||
const int err = getaddrinfo(host.c_str(), nullptr, &hints, &r);
|
||||
int err = getaddrinfo(host.c_str(), nullptr, &hints, &r);
|
||||
if (err) {
|
||||
LOGWARN(4, "getaddrinfo failed for " << host << ": " << gai_strerror(err) << ", retrying with IPv4 only");
|
||||
hints.ai_family = AF_INET;
|
||||
err = getaddrinfo(host.c_str(), nullptr, &hints, &r);
|
||||
}
|
||||
if ((err == 0) && r) {
|
||||
const char* addr_str = nullptr;
|
||||
char addr_str_buf[64];
|
||||
|
|
15
src/util.h
15
src/util.h
|
@ -149,7 +149,12 @@ FORCEINLINE T read_unaligned(const T* p)
|
|||
template<typename T, size_t N> FORCEINLINE constexpr size_t array_size(T(&)[N]) { return N; }
|
||||
template<typename T, typename U, size_t N> FORCEINLINE constexpr size_t array_size(T(U::*)[N]) { return N; }
|
||||
|
||||
[[noreturn]] void panic();
|
||||
[[noreturn]] void panic_stop(const char* message);
|
||||
|
||||
#define STR(X) #X
|
||||
#define STR2(X) STR(X)
|
||||
|
||||
#define PANIC_STOP(...) panic_stop(__FILE__ ":" STR2(__LINE__))
|
||||
|
||||
void make_thread_background();
|
||||
|
||||
|
@ -208,6 +213,14 @@ struct RandomDeviceSeed
|
|||
static RandomDeviceSeed instance;
|
||||
};
|
||||
|
||||
FORCEINLINE uint64_t xorshift64star(uint64_t x)
|
||||
{
|
||||
x ^= x >> 12;
|
||||
x ^= x << 25;
|
||||
x ^= x >> 27;
|
||||
return x * 0x2545F4914F6CDD1DULL;
|
||||
}
|
||||
|
||||
FORCEINLINE uint64_t seconds_since_epoch()
|
||||
{
|
||||
using namespace std::chrono;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <uv.h>
|
||||
#include <thread>
|
||||
|
||||
static_assert(sizeof(in6_addr) == 16, "struct in6_addr has invalid size");
|
||||
static_assert(sizeof(in_addr) == 4, "struct in_addr has invalid size");
|
||||
|
@ -175,4 +176,64 @@ bool CallOnLoop(uv_loop_t* loop, T&& callback)
|
|||
return false;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void parallel_run(uv_loop_t* loop, T&& callback, bool wait = false)
|
||||
{
|
||||
uint32_t THREAD_COUNT = std::thread::hardware_concurrency();
|
||||
|
||||
if (THREAD_COUNT > 0) {
|
||||
--THREAD_COUNT;
|
||||
}
|
||||
|
||||
// No more than 8 threads because our UV worker thread pool has 8 threads
|
||||
if (THREAD_COUNT > 8) {
|
||||
THREAD_COUNT = 8;
|
||||
}
|
||||
|
||||
struct Callback
|
||||
{
|
||||
explicit FORCEINLINE Callback(T&& f) : m_func(std::move(f)) {}
|
||||
Callback& operator=(Callback&&) = delete;
|
||||
|
||||
T m_func;
|
||||
};
|
||||
|
||||
std::shared_ptr<Callback> cb = std::make_shared<Callback>(std::move(callback));
|
||||
|
||||
struct Work
|
||||
{
|
||||
uv_work_t req;
|
||||
std::shared_ptr<Callback> cb;
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < THREAD_COUNT; ++i) {
|
||||
Work* w = new Work{ {}, cb };
|
||||
w->req.data = w;
|
||||
|
||||
const int err = uv_queue_work(loop, &w->req,
|
||||
[](uv_work_t* req)
|
||||
{
|
||||
std::shared_ptr<Callback>& cb = reinterpret_cast<Work*>(req->data)->cb;
|
||||
cb->m_func();
|
||||
cb.reset();
|
||||
},
|
||||
[](uv_work_t* req, int)
|
||||
{
|
||||
delete reinterpret_cast<Work*>(req->data);
|
||||
});
|
||||
|
||||
if (err) {
|
||||
delete w;
|
||||
}
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
cb->m_func();
|
||||
|
||||
while (cb.use_count() > 1) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace p2pool
|
||||
|
|
|
@ -5,7 +5,7 @@ option(STATIC_LIBS "Use locally built libuv and libzmq static libs" OFF)
|
|||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
|
||||
|
||||
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.6.0")
|
||||
if (${CMAKE_VERSION} VERSION_GREATER "3.5.2")
|
||||
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT p2pool_tests)
|
||||
endif()
|
||||
|
||||
|
@ -16,6 +16,8 @@ add_subdirectory(../external/src/RandomX RandomX)
|
|||
set(LIBS ${LIBS} randomx)
|
||||
add_definitions(-DWITH_RANDOMX)
|
||||
|
||||
add_definitions(-DP2POOL_UNIT_TESTS)
|
||||
|
||||
include(cmake/flags.cmake)
|
||||
|
||||
set(HEADERS
|
||||
|
@ -94,7 +96,7 @@ if (WIN32)
|
|||
endif()
|
||||
add_definitions(-DCURL_STATICLIB)
|
||||
elseif (NOT APPLE)
|
||||
set(LIBS ${LIBS} pthread gss dl)
|
||||
set(LIBS ${LIBS} pthread)
|
||||
endif()
|
||||
|
||||
if (STATIC_LIBS)
|
||||
|
|
|
@ -8,7 +8,7 @@ set(CMAKE_C_STANDARD_REQUIRED ON)
|
|||
if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
|
||||
set(GENERAL_FLAGS "-pthread")
|
||||
set(WARNING_FLAGS "-Wall -Wextra")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -s")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -s -flto -fuse-linker-plugin")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
|
@ -43,7 +43,7 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
|
|||
elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang)
|
||||
set(GENERAL_FLAGS "-pthread")
|
||||
set(WARNING_FLAGS "-Wall -Wextra -Wno-undefined-internal")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -funroll-loops -fmerge-all-constants")
|
||||
set(OPTIMIZATION_FLAGS "-Ofast -funroll-loops -fmerge-all-constants -flto")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${GENERAL_FLAGS} ${WARNING_FLAGS} ${OPTIMIZATION_FLAGS}")
|
||||
|
|
|
@ -32,6 +32,7 @@ TEST(block_template, update)
|
|||
|
||||
SideChain sidechain(nullptr, NetworkType::Mainnet);
|
||||
BlockTemplate tpl(&sidechain, nullptr);
|
||||
tpl.rng().seed(123);
|
||||
|
||||
auto H = [](const char* s)
|
||||
{
|
||||
|
@ -59,7 +60,7 @@ TEST(block_template, update)
|
|||
tpl.update(data, mempool, &wallet);
|
||||
|
||||
const PoolBlock* b = tpl.pool_block_template();
|
||||
ASSERT_EQ(b->m_sidechainId, H("b708e3e456d97c43a7fcbd7b4e7aa29bdf45cd909bba07f915cb5f1d805433e6"));
|
||||
ASSERT_EQ(b->m_sidechainId, H("16d6a5c45d452288fcc439e5a258e8230798dfb6bbfd32220303efe932061aa9"));
|
||||
|
||||
std::vector<uint8_t> blobs;
|
||||
uint64_t height;
|
||||
|
@ -77,8 +78,8 @@ TEST(block_template, update)
|
|||
ASSERT_EQ(template_id, 1);
|
||||
|
||||
hash blobs_hash;
|
||||
keccak(blobs.data(), static_cast<int>(blobs.size()), blobs_hash.h, HASH_SIZE);
|
||||
ASSERT_EQ(blobs_hash, H("e9154971a27c412175562d23ab458b0d3cf780a8bcecf62ff3f667fed9d3bc1d"));
|
||||
keccak(blobs.data(), static_cast<int>(blobs.size()), blobs_hash.h);
|
||||
ASSERT_EQ(blobs_hash, H("27bd8678420c8a0948f71c71356252be790899d61f14e35b2d0440a30d730f4c"));
|
||||
|
||||
// Test 2: mempool with high fee and low fee transactions, it must choose high fee transactions
|
||||
for (uint64_t i = 0; i < 512; ++i) {
|
||||
|
|
|
@ -27,7 +27,7 @@ TEST(keccak, hashing)
|
|||
hash output;
|
||||
const uint8_t* data = reinterpret_cast<const uint8_t*>(input);
|
||||
const int len = static_cast<int>(size);
|
||||
keccak(data, len, output.h, HASH_SIZE);
|
||||
keccak(data, len, output.h);
|
||||
|
||||
char buf[log::Stream::BUF_SIZE + 1];
|
||||
log::Stream s(buf);
|
||||
|
|
|
@ -27,6 +27,8 @@ namespace p2pool {
|
|||
|
||||
TEST(pool_block, deserialize)
|
||||
{
|
||||
init_crypto_cache();
|
||||
|
||||
PoolBlock b;
|
||||
SideChain sidechain(nullptr, NetworkType::Mainnet, "mainnet test 2");
|
||||
|
||||
|
@ -101,6 +103,8 @@ TEST(pool_block, deserialize)
|
|||
ASSERT_EQ(s.str(), "f76d731c61c9c9b6c3f46be2e60c9478930b49b4455feecd41ecb9420d000000");
|
||||
|
||||
ASSERT_EQ(b.m_difficulty.check_pow(pow_hash), true);
|
||||
|
||||
destroy_crypto_cache();
|
||||
}
|
||||
|
||||
TEST(pool_block, verify)
|
||||
|
|
Loading…
Reference in a new issue