Fixed data race in tx mempool

This commit is contained in:
SChernykh 2024-03-07 17:48:49 +01:00
parent e82fe8cdc1
commit 29d84e3bcf
3 changed files with 26 additions and 14 deletions

View file

@ -322,22 +322,18 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, const
}
// Only choose transactions that were received 5 or more seconds ago, or high fee (>= 0.006 XMR) transactions
size_t total_mempool_transactions;
{
m_mempoolTxs.clear();
ReadLock mempool_lock(mempool.m_lock);
total_mempool_transactions = mempool.m_transactions.size();
const uint64_t cur_time = seconds_since_epoch();
size_t total_mempool_transactions = 0;
for (auto& it : mempool.m_transactions) {
if ((cur_time > it.second.time_received + 5) || (it.second.fee >= HIGH_FEE_VALUE)) {
m_mempoolTxs.emplace_back(it.second);
}
}
mempool.iterate([this, cur_time, &total_mempool_transactions](const hash&, const TxMempoolData& tx) {
++total_mempool_transactions;
if ((cur_time > tx.time_received + 5) || (tx.fee >= HIGH_FEE_VALUE)) {
m_mempoolTxs.emplace_back(tx);
}
});
// Safeguard for busy mempool moments
// If the block template gets too big, nodes won't be able to send and receive it because of p2p packet size limit

View file

@ -34,7 +34,23 @@ public:
void add(const TxMempoolData& tx);
void swap(std::vector<TxMempoolData>& transactions);
public:
size_t size() const
{
ReadLock lock(m_lock);
return m_transactions.size();
}
template<typename T>
void iterate(T&& callback) const
{
ReadLock lock(m_lock);
for (const auto& it : m_transactions) {
callback(it.first, it.second);
}
}
private:
mutable uv_rwlock_t m_lock;
unordered_map<hash, TxMempoolData> m_transactions;
};

View file

@ -361,7 +361,7 @@ void p2pool::handle_miner_data(MinerData& data)
"\ndifficulty = " << data.difficulty <<
"\nmedian_weight = " << data.median_weight <<
"\nalready_generated_coins = " << data.already_generated_coins <<
"\ntransactions = " << m_mempool->m_transactions.size() <<
"\ntransactions = " << m_mempool->size() <<
"\n---------------------------------------------------------------------------------------------------------------"
);