Added code to deserialize compact blobs

This commit is contained in:
SChernykh 2022-11-09 15:29:53 +01:00
parent c135787620
commit 47ff7e228e
7 changed files with 91 additions and 32 deletions

View file

@ -201,7 +201,7 @@ void BlockCache::load_all(SideChain& side_chain, P2PServer& server)
continue;
}
if (block.deserialize(data + sizeof(uint32_t), n, side_chain, uv_default_loop_checked()) == 0) {
if (block.deserialize(data + sizeof(uint32_t), n, side_chain, uv_default_loop_checked(), false) == 0) {
server.add_cached_block(block);
++blocks_loaded;
}

View file

@ -568,7 +568,7 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
buf.insert(buf.end(), sidechain_data.begin(), sidechain_data.end());
PoolBlock check;
const int result = check.deserialize(buf.data(), buf.size(), m_pool->side_chain(), nullptr);
const int result = check.deserialize(buf.data(), buf.size(), m_pool->side_chain(), nullptr, false);
if (result != 0) {
LOGERR(1, "pool block blob generation and/or parsing is broken, error " << result);
}
@ -1104,7 +1104,7 @@ void BlockTemplate::submit_sidechain_block(uint32_t template_id, uint32_t nonce,
buf.insert(buf.end(), sidechain_data.begin(), sidechain_data.end());
PoolBlock check;
const int result = check.deserialize(buf.data(), buf.size(), side_chain, nullptr);
const int result = check.deserialize(buf.data(), buf.size(), side_chain, nullptr, false);
if (result != 0) {
LOGERR(1, "pool block blob generation and/or parsing is broken, error " << result);
}

View file

@ -768,7 +768,7 @@ void P2PServer::broadcast(const PoolBlock& block, const PoolBlock* parent)
data->pruned_blob.insert(data->pruned_blob.end(), mainchain_data.begin() + outputs_offset + outputs_blob_size, mainchain_data.end());
const size_t N = block.m_transactions.size();
if ((N > 1) && parent) {
if ((N > 1) && parent && (parent->m_transactions.size() > 1)) {
unordered_map<hash, size_t> parent_transactions;
parent_transactions.reserve(parent->m_transactions.size());
@ -975,7 +975,7 @@ int P2PServer::listen_port() const
return params.m_p2pExternalPort ? params.m_p2pExternalPort : m_listenPort;
}
int P2PServer::deserialize_block(const uint8_t* buf, uint32_t size)
int P2PServer::deserialize_block(const uint8_t* buf, uint32_t size, bool compact)
{
int result;
@ -984,7 +984,7 @@ int P2PServer::deserialize_block(const uint8_t* buf, uint32_t size)
result = m_blockDeserializeResult;
}
else {
result = m_block->deserialize(buf, size, m_pool->side_chain(), &m_loop);
result = m_block->deserialize(buf, size, m_pool->side_chain(), &m_loop, compact);
m_blockDeserializeBuf.assign(buf, buf + size);
m_blockDeserializeResult = result;
m_lookForMissingBlocks = true;
@ -1898,7 +1898,7 @@ bool P2PServer::P2PClient::on_block_response(const uint8_t* buf, uint32_t size)
MutexLock lock(server->m_blockLock);
const int result = server->deserialize_block(buf, size);
const int result = server->deserialize_block(buf, size, false);
if (result != 0) {
LOGWARN(3, "peer " << static_cast<char*>(m_addrString) << " sent an invalid block, error " << result);
return false;
@ -1938,7 +1938,7 @@ bool P2PServer::P2PClient::on_block_broadcast(const uint8_t* buf, uint32_t size)
MutexLock lock(server->m_blockLock);
const int result = server->deserialize_block(buf, size);
const int result = server->deserialize_block(buf, size, false);
if (result != 0) {
LOGWARN(3, "peer " << static_cast<char*>(m_addrString) << " sent an invalid block, error " << result);
return false;

View file

@ -146,7 +146,7 @@ public:
void set_max_outgoing_peers(uint32_t n) { m_maxOutgoingPeers = std::min(std::max(n, 10U), 450U); }
void set_max_incoming_peers(uint32_t n) { m_maxIncomingPeers = std::min(std::max(n, 10U), 450U); }
int deserialize_block(const uint8_t* buf, uint32_t size);
int deserialize_block(const uint8_t* buf, uint32_t size, bool compact);
const PoolBlock* get_block() const { return m_block; }
private:

View file

@ -135,7 +135,7 @@ struct PoolBlock
std::vector<uint8_t> serialize_mainchain_data_nolock(size_t* header_size, size_t* miner_tx_size, int* outputs_offset, int* outputs_blob_size) const;
std::vector<uint8_t> serialize_sidechain_data() const;
int deserialize(const uint8_t* data, size_t size, const SideChain& sidechain, uv_loop_t* loop);
int deserialize(const uint8_t* data, size_t size, const SideChain& sidechain, uv_loop_t* loop, bool compact);
void reset_offchain_data();
bool get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const hash& seed_hash, hash& pow_hash);

View file

@ -23,7 +23,7 @@ namespace p2pool {
// Since data here can come from external and possibly malicious sources, check everything
// Only the syntax (i.e. the serialized block binary format) and the keccak hash are checked here
// Semantics must also be checked elsewhere before accepting the block (PoW, reward split between miners, difficulty calculation and so on)
int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& sidechain, uv_loop_t* loop)
int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& sidechain, uv_loop_t* loop, bool compact)
{
try {
// Sanity check
@ -193,6 +193,34 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
uint64_t num_transactions;
READ_VARINT(num_transactions);
const int transactions_offset = static_cast<int>(data - data_begin);
std::vector<uint64_t> parent_indices;
if (compact) {
if (static_cast<uint64_t>(data_end - data) < num_transactions) return __LINE__;
m_transactions.resize(1);
parent_indices.resize(1);
// limit reserved memory size because we can't check "num_transactions" properly here
const uint64_t k = std::min<uint64_t>(num_transactions + 1, 256);
m_transactions.reserve(k);
parent_indices.reserve(k);
for (uint64_t i = 0; i < num_transactions; ++i) {
uint64_t parent_index;
READ_VARINT(parent_index);
hash id;
if (parent_index == 0) {
READ_BUF(id.h, HASH_SIZE);
}
m_transactions.emplace_back(id);
parent_indices.emplace_back(parent_index);
}
}
else {
if (num_transactions > std::numeric_limits<uint64_t>::max() / HASH_SIZE) return __LINE__;
if (static_cast<uint64_t>(data_end - data) < num_transactions * HASH_SIZE) return __LINE__;
@ -202,14 +230,23 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
for (uint64_t i = 0; i < num_transactions; ++i) {
hash id;
READ_BUF(id.h, HASH_SIZE);
m_transactions.emplace_back(std::move(id));
m_transactions.emplace_back(id);
}
}
const int transactions_actual_blob_size = static_cast<int>(data - data_begin) - transactions_offset;
const int transactions_blob_size = static_cast<int>(num_transactions) * HASH_SIZE;
const int transactions_blob_size_diff = transactions_blob_size - transactions_actual_blob_size;
m_transactions.shrink_to_fit();
#if POOL_BLOCK_DEBUG
m_mainChainDataDebug.reserve((data - data_begin) + outputs_blob_size_diff);
m_mainChainDataDebug.reserve((data - data_begin) + outputs_blob_size_diff + transactions_blob_size_diff);
m_mainChainDataDebug.assign(data_begin, data_begin + outputs_offset);
m_mainChainDataDebug.insert(m_mainChainDataDebug.end(), outputs_blob_size, 0);
m_mainChainDataDebug.insert(m_mainChainDataDebug.end(), data_begin + outputs_offset + outputs_actual_blob_size, data);
m_mainChainDataDebug.insert(m_mainChainDataDebug.end(), data_begin + outputs_offset + outputs_actual_blob_size, data_begin + transactions_offset);
m_mainChainDataDebug.insert(m_mainChainDataDebug.end(), transactions_blob_size, 0);
m_mainChainDataDebug.insert(m_mainChainDataDebug.end(), data_begin + transactions_offset + transactions_actual_blob_size, data);
const uint8_t* sidechain_data_begin = data;
#endif
@ -239,6 +276,23 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
READ_BUF(m_parent.h, HASH_SIZE);
if (compact) {
const PoolBlock* parent = sidechain.find_block(m_parent);
if (!parent) {
return __LINE__;
}
for (uint64_t i = 1, n = m_transactions.size(); i < n; ++i) {
const uint64_t parent_index = parent_indices[i];
if (parent_index) {
if (parent_index >= parent->m_transactions.size()) {
return __LINE__;
}
m_transactions[i] = parent->m_transactions[parent_index];
}
}
}
uint64_t num_uncles;
READ_VARINT(num_uncles);
@ -251,7 +305,7 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
for (uint64_t i = 0; i < num_uncles; ++i) {
hash id;
READ_BUF(id.h, HASH_SIZE);
m_uncles.emplace_back(std::move(id));
m_uncles.emplace_back(id);
}
READ_VARINT(m_sidechainHeight);
@ -279,14 +333,18 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
return __LINE__;
}
const uint8_t* transactions_blob = reinterpret_cast<uint8_t*>(m_transactions.data() + 1);
#if POOL_BLOCK_DEBUG
memcpy(m_mainChainDataDebug.data() + outputs_offset, outputs_blob.data(), outputs_blob_size);
memcpy(m_mainChainDataDebug.data() + transactions_offset + outputs_blob_size_diff, transactions_blob, transactions_blob_size);
#endif
hash check;
const std::vector<uint8_t>& consensus_id = sidechain.consensus_id();
const int data_size = static_cast<int>((data_end - data_begin) + outputs_blob_size_diff + transactions_blob_size_diff);
keccak_custom(
[nonce_offset, extra_nonce_offset, sidechain_hash_offset, data_begin, data_end, &consensus_id, &outputs_blob, outputs_blob_size_diff, outputs_offset, outputs_blob_size](int offset) -> uint8_t
[nonce_offset, extra_nonce_offset, sidechain_hash_offset, data_begin, data_size, &consensus_id, &outputs_blob, outputs_blob_size_diff, outputs_offset, outputs_blob_size, transactions_blob, transactions_blob_size_diff, transactions_offset, transactions_blob_size](int offset) -> uint8_t
{
uint32_t k = static_cast<uint32_t>(offset - nonce_offset);
if (k < NONCE_SIZE) {
@ -303,24 +361,25 @@ int PoolBlock::deserialize(const uint8_t* data, size_t size, const SideChain& si
return 0;
}
const int data_size = static_cast<int>((data_end - data_begin) + outputs_blob_size_diff);
if (offset < data_size) {
if (offset < outputs_offset) {
return data_begin[offset];
}
else if (offset < outputs_offset + outputs_blob_size) {
const int tmp = offset - outputs_offset;
return outputs_blob[tmp];
return outputs_blob[offset - outputs_offset];
}
else {
else if (offset < transactions_offset + outputs_blob_size_diff) {
return data_begin[offset - outputs_blob_size_diff];
}
else if (offset < transactions_offset + outputs_blob_size_diff + transactions_blob_size) {
return transactions_blob[offset - (transactions_offset + outputs_blob_size_diff)];
}
return data_begin[offset - outputs_blob_size_diff - transactions_blob_size_diff];
}
offset -= data_size;
return consensus_id[offset];
return consensus_id[offset - data_size];
},
static_cast<int>(size + outputs_blob_size_diff + consensus_id.size()), check.h, HASH_SIZE);
static_cast<int>(size + outputs_blob_size_diff + transactions_blob_size_diff + consensus_id.size()), check.h, HASH_SIZE);
if (check != m_sidechainId) {
return __LINE__;

View file

@ -49,7 +49,7 @@ TEST(pool_block, deserialize)
f.read(reinterpret_cast<char*>(buf.data()), buf.size());
ASSERT_EQ(f.good(), true);
ASSERT_EQ(b.deserialize(buf.data(), buf.size(), sidechain, nullptr), 0);
ASSERT_EQ(b.deserialize(buf.data(), buf.size(), sidechain, nullptr, false), 0);
size_t header_size, miner_tx_size;
int outputs_offset, outputs_blob_size;
@ -137,7 +137,7 @@ TEST(pool_block, verify)
p += sizeof(uint32_t);
ASSERT_TRUE(p + n <= e);
ASSERT_EQ(b.deserialize(p, n, sidechain, nullptr), 0);
ASSERT_EQ(b.deserialize(p, n, sidechain, nullptr, false), 0);
p += n;
sidechain.add_block(b);