Removed hardfork code
Some checks are pending
C/C++ CI / build-ubuntu (map[c:gcc-11 cpp:g++-11 flags: os:ubuntu-20.04]) (push) Waiting to run
C/C++ CI / build-ubuntu (map[c:gcc-12 cpp:g++-12 flags: os:ubuntu-22.04]) (push) Waiting to run
C/C++ CI / build-ubuntu (map[c:gcc-8 cpp:g++-8 flags: os:ubuntu-20.04]) (push) Waiting to run
C/C++ CI / build-openbsd (map[architecture:x86-64 host:ubuntu-latest name:openbsd version:7.4]) (push) Waiting to run
C/C++ CI / build-alpine-static (map[arch:aarch64 branch:latest-stable flags:-ffunction-sections -Wno-error=inline -mfix-cortex-a53-835769 -mfix-cortex-a53-843419]) (push) Waiting to run
C/C++ CI / build-alpine-static (map[arch:riscv64 branch:edge flags:-ffunction-sections -Wno-error=inline]) (push) Waiting to run
C/C++ CI / build-alpine-static (map[arch:x86_64 branch:latest-stable flags:-ffunction-sections -Wno-error=inline]) (push) Waiting to run
C/C++ CI / build-ubuntu-static-libs (map[flags:-fuse-linker-plugin -ffunction-sections -Wno-error=inline]) (push) Waiting to run
C/C++ CI / build-ubuntu-aarch64 (map[flags:-fuse-linker-plugin -ffunction-sections -mfix-cortex-a53-835769 -mfix-cortex-a53-843419 os:ubuntu-20.04]) (push) Waiting to run
C/C++ CI / build-ubuntu-aarch64 (map[flags:-fuse-linker-plugin -ffunction-sections -mfix-cortex-a53-835769 -mfix-cortex-a53-843419 os:ubuntu-22.04]) (push) Waiting to run
C/C++ CI / build-windows-msys2 (map[c:clang cxx:clang++ flags:-fuse-ld=lld -Wno-unused-command-line-argument -Wno-nan-infinity-disabled]) (push) Waiting to run
C/C++ CI / build-windows-msys2 (map[c:gcc cxx:g++ flags:-ffunction-sections -Wno-error=maybe-uninitialized -Wno-error=attributes]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:OFF os:2019 rx:OFF tls:OFF upnp:OFF vs:Visual Studio 16 2019 vspath:C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:OFF os:2019 rx:OFF tls:ON upnp:OFF vs:Visual Studio 16 2019 vspath:C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:OFF os:2019 rx:OFF tls:ON upnp:ON vs:Visual Studio 16 2019 vspath:C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:OFF os:2019 rx:ON tls:ON upnp:ON vs:Visual Studio 16 2019 vspath:C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:ON os:2019 rx:ON tls:ON upnp:ON vs:Visual Studio 16 2019 vspath:C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise]) (push) Waiting to run
C/C++ CI / build-windows-msbuild (map[grpc:ON os:2022 rx:ON tls:ON upnp:ON vs:Visual Studio 17 2022 vspath:C:\Program Files\Microsoft Visual Studio\2022\Enterprise]) (push) Waiting to run
C/C++ CI / build-macos (push) Waiting to run
C/C++ CI / build-macos-aarch64 (push) Waiting to run
Microsoft C++ Code Analysis / Analyze (push) Waiting to run
source-snapshot / source-snapshot (push) Waiting to run
C/C++ CI / build-freebsd (map[architecture:x86-64 host:ubuntu-latest name:freebsd version:13.3]) (push) Waiting to run
clang-tidy / clang-tidy (push) Waiting to run
CodeQL / Analyze (push) Waiting to run
cppcheck / cppcheck-ubuntu (push) Waiting to run
cppcheck / cppcheck-windows (push) Waiting to run
Sync test / sync-test-ubuntu-asan (push) Waiting to run
Sync test / sync-test-macos (map[flags:-Og -ftrapv -target arm64-apple-macos-11 os:macos-14]) (push) Waiting to run
Sync test / sync-test-ubuntu-tsan (push) Waiting to run
Sync test / sync-test-ubuntu-msan (push) Waiting to run
Sync test / sync-test-ubuntu-ubsan (push) Waiting to run
Sync test / sync-test-macos (map[flags:-Og -ftrapv os:macos-13]) (push) Waiting to run
Sync test / sync-test-windows-debug-asan (push) Waiting to run
Sync test / sync-test-windows-leaks (push) Waiting to run

This commit is contained in:
SChernykh 2024-10-13 14:17:37 +02:00
parent c88f126cb1
commit 1053e22a52
5 changed files with 74 additions and 153 deletions

View file

@ -324,15 +324,9 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, const
parallel_run(uv_default_loop_checked(), Precalc(m_shares, m_poolBlockTemplate->m_txkeySec));
}
if (m_poolBlockTemplate->merge_mining_enabled()) {
m_poolBlockTemplate->m_merkleTreeData = PoolBlock::encode_merkle_tree_data(static_cast<uint32_t>(data.aux_chains.size() + 1), data.aux_nonce);
m_poolBlockTemplate->m_merkleTreeDataSize = 0;
writeVarint(m_poolBlockTemplate->m_merkleTreeData, [this](uint8_t) { ++m_poolBlockTemplate->m_merkleTreeDataSize; });
}
else {
m_poolBlockTemplate->m_merkleTreeData = 0;
m_poolBlockTemplate->m_merkleTreeDataSize = 0;
}
m_poolBlockTemplate->m_merkleTreeData = PoolBlock::encode_merkle_tree_data(static_cast<uint32_t>(data.aux_chains.size() + 1), data.aux_nonce);
m_poolBlockTemplate->m_merkleTreeDataSize = 0;
writeVarint(m_poolBlockTemplate->m_merkleTreeData, [this](uint8_t) { ++m_poolBlockTemplate->m_merkleTreeDataSize; });
select_mempool_transactions(mempool);
@ -615,14 +609,8 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, const
m_poolBlockTemplate->m_sidechainId = {};
m_poolBlockTemplate->m_merkleRoot = {};
if (m_poolBlockTemplate->merge_mining_enabled()) {
m_poolBlockTemplate->m_auxChains = data.aux_chains;
m_poolBlockTemplate->m_auxNonce = data.aux_nonce;
}
else {
m_poolBlockTemplate->m_auxChains.clear();
m_poolBlockTemplate->m_auxNonce = 0;
}
m_poolBlockTemplate->m_auxChains = data.aux_chains;
m_poolBlockTemplate->m_auxNonce = data.aux_nonce;
init_merge_mining_merkle_proof();
@ -663,10 +651,7 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, const
}
if (pool_block_debug()) {
const size_t merkle_root_offset =
m_poolBlockTemplate->merge_mining_enabled()
? (m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize)
: (m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2);
const size_t merkle_root_offset = m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize;
memcpy(m_blockTemplateBlob.data() + merkle_root_offset, m_poolBlockTemplate->m_merkleRoot.h, HASH_SIZE);
memcpy(m_fullDataBlob.data() + merkle_root_offset, m_poolBlockTemplate->m_merkleRoot.h, HASH_SIZE);
@ -965,15 +950,9 @@ int BlockTemplate::create_miner_tx(const MinerData& data, const std::vector<Mine
m_minerTxExtra.push_back(TX_EXTRA_MERGE_MINING_TAG);
if (!m_poolBlockTemplate->merge_mining_enabled()) {
m_minerTxExtra.push_back(HASH_SIZE);
m_minerTxExtra.insert(m_minerTxExtra.end(), HASH_SIZE, 0);
}
else {
m_minerTxExtra.push_back(static_cast<uint8_t>(m_poolBlockTemplate->m_merkleTreeDataSize + HASH_SIZE));
writeVarint(m_poolBlockTemplate->m_merkleTreeData, m_minerTxExtra);
m_minerTxExtra.insert(m_minerTxExtra.end(), HASH_SIZE, 0);
}
m_minerTxExtra.push_back(static_cast<uint8_t>(m_poolBlockTemplate->m_merkleTreeDataSize + HASH_SIZE));
writeVarint(m_poolBlockTemplate->m_merkleTreeData, m_minerTxExtra);
m_minerTxExtra.insert(m_minerTxExtra.end(), HASH_SIZE, 0);
// TX_EXTRA end
writeVarint(m_minerTxExtra.size(), m_minerTx);
@ -1064,9 +1043,7 @@ hash BlockTemplate::calc_miner_tx_hash(uint32_t extra_nonce) const
merge_mining_root = get_root_from_proof(sidechain_id, m_poolBlockTemplate->m_merkleProof, aux_slot, n_aux_chains);
}
const size_t merkle_root_offset = m_poolBlockTemplate->merge_mining_enabled()
? (extra_nonce_offset + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize)
: (extra_nonce_offset + m_poolBlockTemplate->m_extraNonceSize + 2);
const size_t merkle_root_offset = extra_nonce_offset + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize;
// 1. Prefix (everything except vin_rct_type byte in the end)
// Apply extra_nonce in-place because we can't write to the block template here
@ -1416,9 +1393,7 @@ std::vector<uint8_t> BlockTemplate::get_block_template_blob(uint32_t template_id
const uint32_t aux_slot = get_aux_slot(m_sidechain->consensus_hash(), m_poolBlockTemplate->m_auxNonce, n_aux_chains);
merge_mining_root = get_root_from_proof(sidechain_id, m_poolBlockTemplate->m_merkleProof, aux_slot, n_aux_chains);
merkle_root_offset = m_poolBlockTemplate->merge_mining_enabled()
? (m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize)
: (m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2);
merkle_root_offset = m_extraNonceOffsetInTemplate + m_poolBlockTemplate->m_extraNonceSize + 2 + m_poolBlockTemplate->m_merkleTreeDataSize;
*pThis = this;

View file

@ -805,10 +805,7 @@ void P2PServer::broadcast(const PoolBlock& block, const PoolBlock* parent)
writeVarint(total_reward, data->pruned_blob);
writeVarint(outputs_blob_size, data->pruned_blob);
if (block.merge_mining_enabled()) {
data->pruned_blob.insert(data->pruned_blob.end(), block.m_sidechainId.h, block.m_sidechainId.h + HASH_SIZE);
}
data->pruned_blob.insert(data->pruned_blob.end(), block.m_sidechainId.h, block.m_sidechainId.h + HASH_SIZE);
data->pruned_blob.insert(data->pruned_blob.end(), mainchain_data.begin() + outputs_offset + outputs_blob_size, mainchain_data.end());
const size_t N = block.m_transactions.size();

View file

@ -204,17 +204,10 @@ std::vector<uint8_t> PoolBlock::serialize_mainchain_data(size_t* header_size, si
*(p++) = TX_EXTRA_MERGE_MINING_TAG;
if (!merge_mining_enabled()) {
*(p++) = HASH_SIZE;
memcpy(p, m_sidechainId.h, HASH_SIZE);
p += HASH_SIZE;
}
else {
*(p++) = static_cast<uint8_t>(m_merkleTreeDataSize + HASH_SIZE);
writeVarint(m_merkleTreeData, [&p](const uint8_t b) { *(p++) = b; });
memcpy(p, m_merkleRoot.h, HASH_SIZE);
p += HASH_SIZE;
}
*(p++) = static_cast<uint8_t>(m_merkleTreeDataSize + HASH_SIZE);
writeVarint(m_merkleTreeData, [&p](const uint8_t b) { *(p++) = b; });
memcpy(p, m_merkleRoot.h, HASH_SIZE);
p += HASH_SIZE;
writeVarint(static_cast<size_t>(p - tx_extra), data);
data.insert(data.end(), tx_extra, p);
@ -267,23 +260,21 @@ std::vector<uint8_t> PoolBlock::serialize_sidechain_data() const
writeVarint(m_cumulativeDifficulty.lo, data);
writeVarint(m_cumulativeDifficulty.hi, data);
if (merge_mining_enabled()) {
const uint8_t n = static_cast<uint8_t>(m_merkleProof.size());
data.push_back(n);
const uint8_t n = static_cast<uint8_t>(m_merkleProof.size());
data.push_back(n);
for (uint8_t i = 0; i < n; ++i) {
const hash& h = m_merkleProof[i];
data.insert(data.end(), h.h, h.h + HASH_SIZE);
}
for (uint8_t i = 0; i < n; ++i) {
const hash& h = m_merkleProof[i];
data.insert(data.end(), h.h, h.h + HASH_SIZE);
}
writeVarint(m_mergeMiningExtra.size(), data);
writeVarint(m_mergeMiningExtra.size(), data);
for (const auto& mm_extra_data : m_mergeMiningExtra) {
data.insert(data.end(), mm_extra_data.first.h, mm_extra_data.first.h + HASH_SIZE);
for (const auto& mm_extra_data : m_mergeMiningExtra) {
data.insert(data.end(), mm_extra_data.first.h, mm_extra_data.first.h + HASH_SIZE);
writeVarint(mm_extra_data.second.size(), data);
data.insert(data.end(), mm_extra_data.second.begin(), mm_extra_data.second.end());
}
writeVarint(mm_extra_data.second.size(), data);
data.insert(data.end(), mm_extra_data.second.begin(), mm_extra_data.second.end());
}
const uint8_t* p = reinterpret_cast<const uint8_t*>(m_sidechainExtraBuf);
@ -422,20 +413,4 @@ hash PoolBlock::calculate_tx_key_seed() const
return result;
}
bool PoolBlock::merge_mining_enabled() const
{
#ifdef P2POOL_UNIT_TESTS
return true;
#else
switch (SideChain::network_type()) {
case NetworkType::Mainnet:
return m_timestamp >= MERGE_MINING_FORK_TIME;
case NetworkType::Testnet:
return m_timestamp >= MERGE_MINING_TESTNET_FORK_TIME;
default:
return false;
}
#endif
}
} // namespace p2pool

View file

@ -63,12 +63,6 @@ static constexpr uint64_t MAX_SIDECHAIN_HEIGHT = 31556952000ULL;
static constexpr uint64_t MERGE_MINING_MAX_CHAINS = 256;
static constexpr uint64_t LOG2_MERGE_MINING_MAX_CHAINS = 8;
// Oct 12 2024 20:00:00 GMT+0000
static constexpr uint64_t MERGE_MINING_FORK_TIME = 1728763200;
// Aug 11 2024 20:00:00 GMT+0000
static constexpr uint64_t MERGE_MINING_TESTNET_FORK_TIME = 1723406400;
struct DifficultyData
{
FORCEINLINE DifficultyData(uint64_t t, const difficulty_type& d) : m_timestamp(t), m_cumulativeDifficulty(d) {}
@ -217,8 +211,6 @@ struct PoolBlock
mm_n_aux_chains = 1U + ((k >> 3U) & ((1U << n) - 1U));
mm_nonce = static_cast<uint32_t>(m_merkleTreeData >> (3U + n));
}
bool merge_mining_enabled() const;
};
} // namespace p2pool

View file

@ -70,7 +70,7 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
READ_VARINT(m_timestamp);
READ_BUF(m_prevId.h, HASH_SIZE);
if (merge_mining_enabled() && (m_minorVersion > 127)) return __LINE__;
if (m_minorVersion > 127) return __LINE__;
const int nonce_offset = static_cast<int>(data - data_begin);
READ_BUF(&m_nonce, NONCE_SIZE);
@ -150,9 +150,7 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
outputs_blob_size = static_cast<int>(tmp);
// Required by sidechain.get_outputs_blob() to speed up repeated broadcasts from different peers
if (merge_mining_enabled()) {
READ_BUF(m_sidechainId.h, HASH_SIZE);
}
READ_BUF(m_sidechainId.h, HASH_SIZE);
}
// Technically some p2pool node could keep stuffing block with transactions until reward is less than 0.6 XMR
@ -193,36 +191,22 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
int mm_root_hash_offset;
uint32_t mm_n_aux_chains, mm_nonce;
if (!merge_mining_enabled()) {
EXPECT_BYTE(HASH_SIZE);
uint64_t mm_field_size;
READ_VARINT(mm_field_size);
mm_root_hash_offset = static_cast<int>((data - data_begin) + outputs_blob_size_diff);
READ_BUF(m_sidechainId.h, HASH_SIZE);
const uint8_t* const mm_field_begin = data;
mm_n_aux_chains = 1;
mm_nonce = 0;
READ_VARINT(m_merkleTreeData);
m_merkleRoot = static_cast<root_hash&>(m_sidechainId);
m_merkleTreeDataSize = 0;
}
else {
uint64_t mm_field_size;
READ_VARINT(mm_field_size);
m_merkleTreeDataSize = static_cast<uint32_t>(data - mm_field_begin);
const uint8_t* const mm_field_begin = data;
decode_merkle_tree_data(mm_n_aux_chains, mm_nonce);
READ_VARINT(m_merkleTreeData);
mm_root_hash_offset = static_cast<int>((data - data_begin) + outputs_blob_size_diff);
READ_BUF(m_merkleRoot.h, HASH_SIZE);
m_merkleTreeDataSize = static_cast<uint32_t>(data - mm_field_begin);
decode_merkle_tree_data(mm_n_aux_chains, mm_nonce);
mm_root_hash_offset = static_cast<int>((data - data_begin) + outputs_blob_size_diff);
READ_BUF(m_merkleRoot.h, HASH_SIZE);
if (static_cast<uint64_t>(data - mm_field_begin) != mm_field_size) {
return __LINE__;
}
if (static_cast<uint64_t>(data - mm_field_begin) != mm_field_size) {
return __LINE__;
}
if (static_cast<uint64_t>(data - tx_extra_begin) != tx_extra_size) return __LINE__;
@ -363,52 +347,50 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
m_merkleProof.clear();
m_mergeMiningExtra.clear();
if (merge_mining_enabled()) {
uint8_t merkle_proof_size;
READ_BYTE(merkle_proof_size);
uint8_t merkle_proof_size;
READ_BYTE(merkle_proof_size);
if (merkle_proof_size > LOG2_MERGE_MINING_MAX_CHAINS) {
return __LINE__;
}
if (merkle_proof_size > LOG2_MERGE_MINING_MAX_CHAINS) {
return __LINE__;
}
m_merkleProof.reserve(merkle_proof_size);
m_merkleProof.reserve(merkle_proof_size);
for (uint8_t i = 0; i < merkle_proof_size; ++i) {
hash h;
READ_BUF(h.h, HASH_SIZE);
m_merkleProof.emplace_back(h);
}
for (uint8_t i = 0; i < merkle_proof_size; ++i) {
hash h;
READ_BUF(h.h, HASH_SIZE);
m_merkleProof.emplace_back(h);
}
uint64_t mm_extra_data_count;
READ_VARINT(mm_extra_data_count);
uint64_t mm_extra_data_count;
READ_VARINT(mm_extra_data_count);
if (mm_extra_data_count) {
// Sanity check
if (mm_extra_data_count > MERGE_MINING_MAX_CHAINS) return __LINE__;
if (static_cast<uint64_t>(data_end - data) < mm_extra_data_count * (HASH_SIZE + 1)) return __LINE__;
m_mergeMiningExtra.reserve(mm_extra_data_count);
for (uint64_t i = 0; i < mm_extra_data_count; ++i) {
hash chain_id;
READ_BUF(chain_id.h, HASH_SIZE);
// IDs must be ordered to avoid duplicates
if (i && !(m_mergeMiningExtra[i - 1].first < chain_id)) return __LINE__;
uint64_t n;
READ_VARINT(n);
if (mm_extra_data_count) {
// Sanity check
if (mm_extra_data_count > MERGE_MINING_MAX_CHAINS) return __LINE__;
if (static_cast<uint64_t>(data_end - data) < mm_extra_data_count * (HASH_SIZE + 1)) return __LINE__;
if (static_cast<uint64_t>(data_end - data) < n) return __LINE__;
m_mergeMiningExtra.reserve(mm_extra_data_count);
std::vector<uint8_t> t;
t.resize(n);
for (uint64_t i = 0; i < mm_extra_data_count; ++i) {
hash chain_id;
READ_BUF(chain_id.h, HASH_SIZE);
READ_BUF(t.data(), n);
// IDs must be ordered to avoid duplicates
if (i && !(m_mergeMiningExtra[i - 1].first < chain_id)) return __LINE__;
uint64_t n;
READ_VARINT(n);
// Sanity check
if (static_cast<uint64_t>(data_end - data) < n) return __LINE__;
std::vector<uint8_t> t;
t.resize(n);
READ_BUF(t.data(), n);
m_mergeMiningExtra.emplace_back(chain_id, std::move(t));
}
m_mergeMiningExtra.emplace_back(chain_id, std::move(t));
}
}