Fixed get_block_template_backlog performance

Before the fix, it processed all transactions in the mempool which could be very slow when mempool grows to several MBs in size. I observed `get_block_template_backlog` taking up to 15 seconds of CPU time under high mempool load.

After the fix, only transactions that can potentially be mined in the next block will be processed (a bit more than the current block median weight).
This commit is contained in:
SChernykh 2022-06-08 22:58:13 +02:00
parent 9750e1fa10
commit f79765579c
2 changed files with 57 additions and 18 deletions

View file

@ -917,26 +917,61 @@ namespace cryptonote
{
CRITICAL_REGION_LOCAL(m_transactions_lock);
CRITICAL_REGION_LOCAL1(m_blockchain);
const relay_category category = include_sensitive ? relay_category::all : relay_category::broadcasted;
backlog.reserve(m_blockchain.get_txpool_tx_count(include_sensitive));
txpool_tx_meta_t tmp_meta;
m_blockchain.for_all_txpool_txes([this, &backlog, &tmp_meta](const crypto::hash &txid, const txpool_tx_meta_t &meta, const cryptonote::blobdata_ref *bd){
transaction tx;
if (!(meta.pruned ? parse_and_validate_tx_base_from_blob(*bd, tx) : parse_and_validate_tx_from_blob(*bd, tx)))
{
MERROR("Failed to parse tx from txpool");
// continue
return true;
}
tx.set_hash(txid);
tmp_meta = meta;
if (is_transaction_ready_to_go(tmp_meta, txid, *bd, tx))
backlog.push_back({txid, meta.weight, meta.fee});
std::vector<tx_block_template_backlog_entry> tmp;
uint64_t total_weight = 0;
// First get everything from the mempool, filter it later
m_blockchain.for_all_txpool_txes([&tmp, &total_weight](const crypto::hash &txid, const txpool_tx_meta_t &meta, const cryptonote::blobdata_ref*){
tmp.emplace_back(tx_block_template_backlog_entry{txid, meta.weight, meta.fee});
total_weight += meta.weight;
return true;
}, true, category);
}, false, include_sensitive ? relay_category::all : relay_category::broadcasted);
// Limit backlog to 112.5% of current median weight. This is enough to mine a full block with the optimal block reward
const uint64_t median_weight = m_blockchain.get_current_cumulative_block_weight_median();
const uint64_t max_backlog_weight = median_weight + (median_weight / 8);
// If the total weight is too high, choose the best paying transactions
if (total_weight > max_backlog_weight)
std::sort(tmp.begin(), tmp.end(), [](const auto& a, const auto& b){ return a.fee * b.weight > b.fee * a.weight; });
backlog.clear();
uint64_t w = 0;
std::unordered_set<crypto::key_image> k_images;
for (const tx_block_template_backlog_entry& e : tmp)
{
try
{
txpool_tx_meta_t meta;
if (!m_blockchain.get_txpool_tx_meta(e.id, meta))
continue;
cryptonote::blobdata txblob;
if (!m_blockchain.get_txpool_tx_blob(e.id, txblob, relay_category::all))
continue;
cryptonote::transaction tx;
if (is_transaction_ready_to_go(meta, e.id, txblob, tx))
{
if (have_key_images(k_images, tx))
continue;
append_key_images(k_images, tx);
backlog.push_back(e);
w += e.weight;
if (w > max_backlog_weight)
break;
}
}
catch (const std::exception &e)
{
MERROR("Failed to check transaction readiness: " << e.what());
// continue, not fatal
}
}
}
//------------------------------------------------------------------
void tx_memory_pool::get_transaction_stats(struct txpool_stats& stats, bool include_sensitive) const

View file

@ -266,7 +266,11 @@ namespace cryptonote
void get_transaction_backlog(std::vector<tx_backlog_entry>& backlog, bool include_sensitive = false) const;
/**
* @brief get (hash, weight, fee) for all transactions in the pool - the minimum required information to create a block template
* @brief get (hash, weight, fee) for transactions in the pool - the minimum required information to create a block template
*
* Not all transactions in the pool will be returned for performance reasons
* If there are too many transactions in the pool, only the highest-paying transactions
* will be returned - but enough for the miner to create a full block
*
* @param backlog return-by-reference that data
* @param include_sensitive return stempool, anonymity-pool, and unrelayed txes