mirror of
https://github.com/monero-project/monero.git
synced 2024-12-23 20:19:34 +00:00
Merge pull request #4909
756684bb
blockchain: avoid unnecessary DB lookups when syncing (moneromooo-monero)
This commit is contained in:
commit
83684ea515
2 changed files with 86 additions and 35 deletions
|
@ -3755,7 +3755,7 @@ void Blockchain::set_enforce_dns_checkpoints(bool enforce_checkpoints)
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------
|
//------------------------------------------------------------------
|
||||||
void Blockchain::block_longhash_worker(uint64_t height, const std::vector<block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const
|
void Blockchain::block_longhash_worker(uint64_t height, const epee::span<const block> &blocks, std::unordered_map<crypto::hash, crypto::hash> &map) const
|
||||||
{
|
{
|
||||||
TIME_MEASURE_START(t);
|
TIME_MEASURE_START(t);
|
||||||
slow_hash_allocate_state();
|
slow_hash_allocate_state();
|
||||||
|
@ -3841,11 +3841,33 @@ bool Blockchain::cleanup_handle_incoming_blocks(bool force_sync)
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------
|
//------------------------------------------------------------------
|
||||||
void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs) const
|
void Blockchain::output_scan_worker(const uint64_t amount, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, const std::vector<output_data_t> &extra_tx_map) const
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
m_db->get_output_key(epee::span<const uint64_t>(&amount, 1), offsets, outputs, true);
|
m_db->get_output_key(epee::span<const uint64_t>(&amount, 1), offsets, outputs, true);
|
||||||
|
if (outputs.size() < offsets.size())
|
||||||
|
{
|
||||||
|
const uint64_t n_outputs = m_db->get_num_outputs(amount);
|
||||||
|
for (size_t i = outputs.size(); i < offsets.size(); ++i)
|
||||||
|
{
|
||||||
|
uint64_t idx = offsets[i];
|
||||||
|
if (idx < n_outputs)
|
||||||
|
{
|
||||||
|
MWARNING("Index " << idx << " not found in db for amount " << amount << ", but it is less than the number of entries");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if (idx < n_outputs + extra_tx_map.size())
|
||||||
|
{
|
||||||
|
outputs.push_back(extra_tx_map[idx - n_outputs]);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
MWARNING("missed " << amount << "/" << idx << " in " << extra_tx_map.size() << " (chain " << n_outputs << ")");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
catch (const std::exception& e)
|
catch (const std::exception& e)
|
||||||
{
|
{
|
||||||
|
@ -3960,6 +3982,34 @@ uint64_t Blockchain::prevalidate_block_hashes(uint64_t height, const std::vector
|
||||||
// vs [k_image, output_keys] (m_scan_table). This is faster because it takes advantage of bulk queries
|
// vs [k_image, output_keys] (m_scan_table). This is faster because it takes advantage of bulk queries
|
||||||
// and is threaded if possible. The table (m_scan_table) will be used later when querying output
|
// and is threaded if possible. The table (m_scan_table) will be used later when querying output
|
||||||
// keys.
|
// keys.
|
||||||
|
static bool update_output_map(std::map<uint64_t, std::vector<output_data_t>> &extra_tx_map, const transaction &tx, uint64_t height, bool miner)
|
||||||
|
{
|
||||||
|
MTRACE("Blockchain::" << __func__);
|
||||||
|
for (size_t i = 0; i < tx.vout.size(); ++i)
|
||||||
|
{
|
||||||
|
const auto &out = tx.vout[i];
|
||||||
|
if (out.target.type() != typeid(txout_to_key))
|
||||||
|
continue;
|
||||||
|
const txout_to_key &out_to_key = boost::get<txout_to_key>(out.target);
|
||||||
|
rct::key commitment;
|
||||||
|
uint64_t amount = out.amount;
|
||||||
|
if (miner && tx.version == 2)
|
||||||
|
{
|
||||||
|
commitment = rct::zeroCommit(amount);
|
||||||
|
amount = 0;
|
||||||
|
}
|
||||||
|
else if (tx.version > 1)
|
||||||
|
{
|
||||||
|
CHECK_AND_ASSERT_MES(i < tx.rct_signatures.outPk.size(), false, "Invalid outPk size");
|
||||||
|
commitment = tx.rct_signatures.outPk[i].mask;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
commitment = rct::zero();
|
||||||
|
extra_tx_map[amount].push_back(output_data_t{out_to_key.key, tx.unlock_time, height, commitment});
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry)
|
bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry)
|
||||||
{
|
{
|
||||||
MTRACE("Blockchain::" << __func__);
|
MTRACE("Blockchain::" << __func__);
|
||||||
|
@ -4006,42 +4056,40 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
m_blockchain_lock.lock();
|
m_blockchain_lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((m_db->height() + blocks_entry.size()) < m_blocks_hash_check.size())
|
const uint64_t height = m_db->height();
|
||||||
|
if ((height + blocks_entry.size()) < m_blocks_hash_check.size())
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
bool blocks_exist = false;
|
bool blocks_exist = false;
|
||||||
tools::threadpool& tpool = tools::threadpool::getInstance();
|
tools::threadpool& tpool = tools::threadpool::getInstance();
|
||||||
uint64_t threads = tpool.get_max_concurrency();
|
unsigned threads = tpool.get_max_concurrency();
|
||||||
|
std::vector<block> blocks;
|
||||||
|
blocks.resize(blocks_entry.size());
|
||||||
|
|
||||||
if (blocks_entry.size() > 1 && threads > 1 && m_max_prepare_blocks_threads > 1)
|
if (1)
|
||||||
{
|
{
|
||||||
// limit threads, default limit = 4
|
// limit threads, default limit = 4
|
||||||
if(threads > m_max_prepare_blocks_threads)
|
if(threads > m_max_prepare_blocks_threads)
|
||||||
threads = m_max_prepare_blocks_threads;
|
threads = m_max_prepare_blocks_threads;
|
||||||
|
|
||||||
uint64_t height = m_db->height();
|
unsigned int batches = blocks_entry.size() / threads;
|
||||||
int batches = blocks_entry.size() / threads;
|
unsigned int extra = blocks_entry.size() % threads;
|
||||||
int extra = blocks_entry.size() % threads;
|
|
||||||
MDEBUG("block_batches: " << batches);
|
MDEBUG("block_batches: " << batches);
|
||||||
std::vector<std::unordered_map<crypto::hash, crypto::hash>> maps(threads);
|
std::vector<std::unordered_map<crypto::hash, crypto::hash>> maps(threads);
|
||||||
std::vector < std::vector < block >> blocks(threads);
|
|
||||||
auto it = blocks_entry.begin();
|
auto it = blocks_entry.begin();
|
||||||
|
unsigned blockidx = 0;
|
||||||
|
|
||||||
for (uint64_t i = 0; i < threads; i++)
|
for (unsigned i = 0; i < threads; i++)
|
||||||
{
|
{
|
||||||
blocks[i].reserve(batches + 1);
|
for (unsigned int j = 0; j < batches; j++, ++blockidx)
|
||||||
for (int j = 0; j < batches; j++)
|
|
||||||
{
|
{
|
||||||
block block;
|
block &block = blocks[blockidx];
|
||||||
|
|
||||||
if (!parse_and_validate_block_from_blob(it->block, block))
|
if (!parse_and_validate_block_from_blob(it->block, block))
|
||||||
{
|
return false;
|
||||||
std::advance(it, 1);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check first block and skip all blocks if its not chained properly
|
// check first block and skip all blocks if its not chained properly
|
||||||
if (i == 0 && j == 0)
|
if (blockidx == 0)
|
||||||
{
|
{
|
||||||
crypto::hash tophash = m_db->top_block_hash();
|
crypto::hash tophash = m_db->top_block_hash();
|
||||||
if (block.prev_id != tophash)
|
if (block.prev_id != tophash)
|
||||||
|
@ -4056,20 +4104,16 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks[i].push_back(std::move(block));
|
|
||||||
std::advance(it, 1);
|
std::advance(it, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < extra && !blocks_exist; i++)
|
for (unsigned i = 0; i < extra && !blocks_exist; i++, blockidx++)
|
||||||
{
|
{
|
||||||
block block;
|
block &block = blocks[blockidx];
|
||||||
|
|
||||||
if (!parse_and_validate_block_from_blob(it->block, block))
|
if (!parse_and_validate_block_from_blob(it->block, block))
|
||||||
{
|
return false;
|
||||||
std::advance(it, 1);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (have_block(get_block_hash(block)))
|
if (have_block(get_block_hash(block)))
|
||||||
{
|
{
|
||||||
|
@ -4077,7 +4121,6 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks[i].push_back(std::move(block));
|
|
||||||
std::advance(it, 1);
|
std::advance(it, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4086,10 +4129,13 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
m_blocks_longhash_table.clear();
|
m_blocks_longhash_table.clear();
|
||||||
uint64_t thread_height = height;
|
uint64_t thread_height = height;
|
||||||
tools::threadpool::waiter waiter;
|
tools::threadpool::waiter waiter;
|
||||||
for (uint64_t i = 0; i < threads; i++)
|
for (unsigned int i = 0; i < threads; i++)
|
||||||
{
|
{
|
||||||
tpool.submit(&waiter, boost::bind(&Blockchain::block_longhash_worker, this, thread_height, std::cref(blocks[i]), std::ref(maps[i])), true);
|
unsigned nblocks = batches;
|
||||||
thread_height += blocks[i].size();
|
if (i < extra)
|
||||||
|
++nblocks;
|
||||||
|
tpool.submit(&waiter, boost::bind(&Blockchain::block_longhash_worker, this, thread_height, epee::span<const block>(&blocks[i], nblocks), std::ref(maps[i])), true);
|
||||||
|
thread_height += nblocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
waiter.wait(&tpool);
|
waiter.wait(&tpool);
|
||||||
|
@ -4132,7 +4178,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
// [input] stores all absolute_offsets for each amount
|
// [input] stores all absolute_offsets for each amount
|
||||||
std::map<uint64_t, std::vector<uint64_t>> offset_map;
|
std::map<uint64_t, std::vector<uint64_t>> offset_map;
|
||||||
// [output] stores all output_data_t for each absolute_offset
|
// [output] stores all output_data_t for each absolute_offset
|
||||||
std::map<uint64_t, std::vector<output_data_t>> tx_map;
|
std::map<uint64_t, std::vector<output_data_t>> tx_map, extra_tx_map;
|
||||||
std::vector<std::pair<cryptonote::transaction, crypto::hash>> txes(total_txs);
|
std::vector<std::pair<cryptonote::transaction, crypto::hash>> txes(total_txs);
|
||||||
|
|
||||||
#define SCAN_TABLE_QUIT(m) \
|
#define SCAN_TABLE_QUIT(m) \
|
||||||
|
@ -4143,12 +4189,14 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
} while(0); \
|
} while(0); \
|
||||||
|
|
||||||
// generate sorted tables for all amounts and absolute offsets
|
// generate sorted tables for all amounts and absolute offsets
|
||||||
size_t tx_index = 0;
|
size_t tx_index = 0, block_index = 0;
|
||||||
for (const auto &entry : blocks_entry)
|
for (const auto &entry : blocks_entry)
|
||||||
{
|
{
|
||||||
if (m_cancel)
|
if (m_cancel)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (!update_output_map(extra_tx_map, blocks[block_index].miner_tx, height + block_index, true))
|
||||||
|
SCAN_TABLE_QUIT("Error building extra tx map.");
|
||||||
for (const auto &tx_blob : entry.txs)
|
for (const auto &tx_blob : entry.txs)
|
||||||
{
|
{
|
||||||
if (tx_index >= txes.size())
|
if (tx_index >= txes.size())
|
||||||
|
@ -4207,7 +4255,10 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
offset_map[in_to_key.amount].push_back(offset);
|
offset_map[in_to_key.amount].push_back(offset);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
if (!update_output_map(extra_tx_map, tx, height + block_index, false))
|
||||||
|
SCAN_TABLE_QUIT("Error building extra tx map.");
|
||||||
}
|
}
|
||||||
|
++block_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort and remove duplicate absolute_offsets in offset_map
|
// sort and remove duplicate absolute_offsets in offset_map
|
||||||
|
@ -4230,7 +4281,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
for (size_t i = 0; i < amounts.size(); i++)
|
for (size_t i = 0; i < amounts.size(); i++)
|
||||||
{
|
{
|
||||||
uint64_t amount = amounts[i];
|
uint64_t amount = amounts[i];
|
||||||
tpool.submit(&waiter, boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount])), true);
|
tpool.submit(&waiter, boost::bind(&Blockchain::output_scan_worker, this, amount, std::cref(offset_map[amount]), std::ref(tx_map[amount]), std::cref(extra_tx_map[amount])), true);
|
||||||
}
|
}
|
||||||
waiter.wait(&tpool);
|
waiter.wait(&tpool);
|
||||||
}
|
}
|
||||||
|
@ -4239,7 +4290,7 @@ bool Blockchain::prepare_handle_incoming_blocks(const std::vector<block_complete
|
||||||
for (size_t i = 0; i < amounts.size(); i++)
|
for (size_t i = 0; i < amounts.size(); i++)
|
||||||
{
|
{
|
||||||
uint64_t amount = amounts[i];
|
uint64_t amount = amounts[i];
|
||||||
output_scan_worker(amount, offset_map[amount], tx_map[amount]);
|
output_scan_worker(amount, offset_map[amount], tx_map[amount], extra_tx_map[amount]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -918,7 +918,7 @@ namespace cryptonote
|
||||||
* @param outputs return-by-reference the outputs collected
|
* @param outputs return-by-reference the outputs collected
|
||||||
*/
|
*/
|
||||||
void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
|
void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
|
||||||
std::vector<output_data_t> &outputs) const;
|
std::vector<output_data_t> &outputs, const std::vector<output_data_t> &extra_tx_map) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief computes the "short" and "long" hashes for a set of blocks
|
* @brief computes the "short" and "long" hashes for a set of blocks
|
||||||
|
@ -927,7 +927,7 @@ namespace cryptonote
|
||||||
* @param blocks the blocks to be hashed
|
* @param blocks the blocks to be hashed
|
||||||
* @param map return-by-reference the hashes for each block
|
* @param map return-by-reference the hashes for each block
|
||||||
*/
|
*/
|
||||||
void block_longhash_worker(uint64_t height, const std::vector<block> &blocks,
|
void block_longhash_worker(uint64_t height, const epee::span<const block> &blocks,
|
||||||
std::unordered_map<crypto::hash, crypto::hash> &map) const;
|
std::unordered_map<crypto::hash, crypto::hash> &map) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in a new issue