From 1554a44f5cdd874b8a45adddc4a9f4f28826c336 Mon Sep 17 00:00:00 2001 From: SChernykh Date: Sat, 4 Sep 2021 09:10:44 +0200 Subject: [PATCH] Added more integrity checks --- src/block_template.cpp | 16 +++++++++++++++- src/stratum_server.cpp | 31 +++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/block_template.cpp b/src/block_template.cpp index 9fb4d3f..66b2ccc 100644 --- a/src/block_template.cpp +++ b/src/block_template.cpp @@ -911,7 +911,21 @@ uint32_t BlockTemplate::get_hashing_blobs(uint32_t extra_nonce_start, uint32_t c for (uint32_t i = 0; i < count; ++i) { uint8_t blob[128]; - blob_size = get_hashing_blob_nolock(extra_nonce_start + i, blob); + const uint32_t n = get_hashing_blob_nolock(extra_nonce_start + i, blob); + + if (n > sizeof(blob)) { + LOGERR(1, "internal error: get_hashing_blob_nolock returned too large blob size " << n << ", expected <= " << sizeof(blob)); + } + else if (n < 76) { + LOGERR(1, "internal error: get_hashing_blob_nolock returned too little blob size " << n << ", expected >= 76"); + } + + if (blob_size == 0) { + blob_size = n; + } + else if (n != blob_size) { + LOGERR(1, "internal error: get_hashing_blob_nolock returned different blob size " << n << ", expected " << blob_size); + } blobs.insert(blobs.end(), blob, blob + blob_size); } diff --git a/src/stratum_server.cpp b/src/stratum_server.cpp index b7bde07..30a6eb7 100644 --- a/src/stratum_server.cpp +++ b/src/stratum_server.cpp @@ -111,6 +111,37 @@ void StratumServer::on_block(const BlockTemplate& block) m_extraNonce.exchange(blobs_data->m_numClientsExpected); blobs_data->m_blobSize = block.get_hashing_blobs(0, blobs_data->m_numClientsExpected, blobs_data->m_blobs, blobs_data->m_height, difficulty, sidechain_difficulty, blobs_data->m_seedHash, nonce_offset, blobs_data->m_templateId); + + // Integrity checks + if (blobs_data->m_blobSize < 76) { + LOGERR(1, "internal error: get_hashing_blobs returned too small blobs (" << blobs_data->m_blobSize << " bytes)"); + } + else if (blobs_data->m_blobs.size() != blobs_data->m_blobSize * num_connections) { + LOGERR(1, "internal error: get_hashing_blobs returned wrong amount of data"); + } + else if (num_connections > 1) { + std::vector blob_hashes; + blob_hashes.reserve(num_connections); + + const uint8_t* data = blobs_data->m_blobs.data(); + const size_t size = blobs_data->m_blobSize; + + // Get first 8 bytes of the Merkle root hash from each blob + for (size_t i = 0; i < num_connections; ++i) { + blob_hashes.emplace_back(*reinterpret_cast(data + i * size + 43)); + } + + // Find duplicates + std::sort(blob_hashes.begin(), blob_hashes.end()); + + for (uint32_t i = 1; i < num_connections; ++i) { + if (blob_hashes[i - 1] == blob_hashes[i]) { + LOGERR(1, "internal error: get_hashing_blobs returned two identical blobs"); + break; + } + } + } + blobs_data->m_target = std::max(difficulty.target(), sidechain_difficulty.target()); {