mirror of
https://github.com/SChernykh/p2pool.git
synced 2024-12-22 19:39:22 +00:00
Fixed data races
This commit is contained in:
parent
d23c46ff84
commit
134f2d68a3
6 changed files with 30 additions and 19 deletions
|
@ -855,8 +855,10 @@ uint64_t P2PServer::get_random64()
|
|||
|
||||
void P2PServer::print_status()
|
||||
{
|
||||
MutexLock lock(m_peerListLock);
|
||||
|
||||
LOGINFO(0, "status" <<
|
||||
"\nConnections = " << m_numConnections << " (" << m_numIncomingConnections << " incoming)" <<
|
||||
"\nConnections = " << m_numConnections.load() << " (" << m_numIncomingConnections.load() << " incoming)" <<
|
||||
"\nPeer list size = " << m_peerList.size() <<
|
||||
"\nUptime = " << log::Duration(seconds_since_epoch() - m_pool->start_time())
|
||||
);
|
||||
|
|
|
@ -80,6 +80,7 @@ SideChain::SideChain(p2pool* pool, NetworkType type, const char* pool_name)
|
|||
|
||||
uv_mutex_init_checked(&m_sidechainLock);
|
||||
uv_mutex_init_checked(&m_seenBlocksLock);
|
||||
uv_rwlock_init_checked(&m_curDifficultyLock);
|
||||
|
||||
m_difficultyData.reserve(m_chainWindowSize);
|
||||
m_tmpShares.reserve(m_chainWindowSize * 2);
|
||||
|
@ -164,6 +165,7 @@ SideChain::~SideChain()
|
|||
{
|
||||
uv_mutex_destroy(&m_sidechainLock);
|
||||
uv_mutex_destroy(&m_seenBlocksLock);
|
||||
uv_rwlock_destroy(&m_curDifficultyLock);
|
||||
for (auto& it : m_blocksById) {
|
||||
delete it.second;
|
||||
}
|
||||
|
@ -263,7 +265,7 @@ void SideChain::fill_sidechain_data(PoolBlock& block, Wallet* w, const hash& txk
|
|||
block.m_uncles.erase(std::unique(block.m_uncles.begin(), block.m_uncles.end()), block.m_uncles.end());
|
||||
}
|
||||
|
||||
block.m_difficulty = m_curDifficulty;
|
||||
block.m_difficulty = difficulty();
|
||||
block.m_cumulativeDifficulty = tip->m_cumulativeDifficulty + block.m_difficulty;
|
||||
|
||||
for (const hash& uncle_id : block.m_uncles) {
|
||||
|
@ -395,7 +397,8 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector<hash>& missing_
|
|||
return false;
|
||||
}
|
||||
|
||||
bool too_low_diff = (block.m_difficulty < m_curDifficulty);
|
||||
const difficulty_type expected_diff = difficulty();
|
||||
bool too_low_diff = (block.m_difficulty < expected_diff);
|
||||
{
|
||||
MutexLock lock(m_sidechainLock);
|
||||
if (m_blocksById.find(block.m_sidechainId) != m_blocksById.end()) {
|
||||
|
@ -423,7 +426,7 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector<hash>& missing_
|
|||
LOGINFO(4, "add_external_block: height = " << block.m_sidechainHeight << ", id = " << block.m_sidechainId << ", mainchain height = " << block.m_txinGenHeight);
|
||||
|
||||
if (too_low_diff) {
|
||||
LOGWARN(4, "add_external_block: block has too low difficulty " << block.m_difficulty << ", expected >= ~" << m_curDifficulty << ". Ignoring it.");
|
||||
LOGWARN(4, "add_external_block: block has too low difficulty " << block.m_difficulty << ", expected >= ~" << expected_diff << ". Ignoring it.");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -672,10 +675,12 @@ void SideChain::print_status()
|
|||
std::vector<hash> blocks_in_window;
|
||||
blocks_in_window.reserve(m_chainWindowSize * 9 / 8);
|
||||
|
||||
const difficulty_type diff = difficulty();
|
||||
|
||||
MutexLock lock(m_sidechainLock);
|
||||
|
||||
uint64_t rem;
|
||||
uint64_t pool_hashrate = udiv128(m_curDifficulty.hi, m_curDifficulty.lo, m_targetBlockTime, &rem);
|
||||
uint64_t pool_hashrate = udiv128(diff.hi, diff.lo, m_targetBlockTime, &rem);
|
||||
|
||||
difficulty_type network_diff = m_pool->miner_data().difficulty;
|
||||
uint64_t network_hashrate = udiv128(network_diff.hi, network_diff.lo, 120, &rem);
|
||||
|
@ -1405,10 +1410,13 @@ void SideChain::update_chain_tip(PoolBlock* block)
|
|||
difficulty_type diff;
|
||||
if (get_difficulty(block, m_difficultyData, diff)) {
|
||||
m_chainTip = block;
|
||||
m_curDifficulty = diff;
|
||||
{
|
||||
WriteLock lock(m_curDifficultyLock);
|
||||
m_curDifficulty = diff;
|
||||
}
|
||||
|
||||
LOGINFO(2, "new chain tip: next height = " << log::Gray() << block->m_sidechainHeight + 1 << log::NoColor() <<
|
||||
", next difficulty = " << log::Gray() << m_curDifficulty << log::NoColor() <<
|
||||
", next difficulty = " << log::Gray() << diff << log::NoColor() <<
|
||||
", main chain height = " << log::Gray() << block->m_txinGenHeight);
|
||||
|
||||
block->m_wantBroadcast = true;
|
||||
|
|
|
@ -65,7 +65,7 @@ public:
|
|||
const std::vector<uint8_t>& consensus_id() const { return m_consensusId; }
|
||||
uint64_t chain_window_size() const { return m_chainWindowSize; }
|
||||
NetworkType network_type() const { return m_networkType; }
|
||||
const difficulty_type& difficulty() const { return m_curDifficulty; }
|
||||
FORCEINLINE difficulty_type difficulty() const { ReadLock lock(m_curDifficultyLock); return m_curDifficulty; }
|
||||
difficulty_type total_hashes() const;
|
||||
uint64_t block_time() const { return m_targetBlockTime; }
|
||||
uint64_t miner_count();
|
||||
|
@ -121,6 +121,7 @@ private:
|
|||
std::vector<uint8_t> m_consensusId;
|
||||
std::string m_consensusIdDisplayStr;
|
||||
|
||||
mutable uv_rwlock_t m_curDifficultyLock;
|
||||
difficulty_type m_curDifficulty;
|
||||
|
||||
ChainMain m_watchBlock;
|
||||
|
|
|
@ -102,6 +102,7 @@ void StratumServer::on_block(const BlockTemplate& block)
|
|||
LOGINFO(4, "no clients connected");
|
||||
return;
|
||||
}
|
||||
m_extraNonce.exchange(num_connections);
|
||||
|
||||
BlobsData* blobs_data = new BlobsData{};
|
||||
|
||||
|
@ -113,9 +114,7 @@ void StratumServer::on_block(const BlockTemplate& block)
|
|||
// Even if they do, they'll be added to the beginning of the list and will get their block template in on_login()
|
||||
// We'll iterate through the list backwards so when we get to the beginning and run out of extra_nonce values, it'll be only new clients left
|
||||
blobs_data->m_numClientsExpected = num_connections;
|
||||
m_extraNonce.exchange(blobs_data->m_numClientsExpected);
|
||||
|
||||
blobs_data->m_blobSize = block.get_hashing_blobs(0, blobs_data->m_numClientsExpected, blobs_data->m_blobs, blobs_data->m_height, difficulty, sidechain_difficulty, blobs_data->m_seedHash, nonce_offset, blobs_data->m_templateId);
|
||||
blobs_data->m_blobSize = block.get_hashing_blobs(0, num_connections, blobs_data->m_blobs, blobs_data->m_height, difficulty, sidechain_difficulty, blobs_data->m_seedHash, nonce_offset, blobs_data->m_templateId);
|
||||
|
||||
// Integrity checks
|
||||
if (blobs_data->m_blobSize < 76) {
|
||||
|
@ -541,7 +540,7 @@ void StratumServer::print_stratum_status() const
|
|||
"\nShares found = " << m_totalFoundShares <<
|
||||
"\nAverage effort = " << average_effort << '%' <<
|
||||
"\nCurrent effort = " << static_cast<double>(hashes_since_last_share) * 100.0 / m_pool->side_chain().difficulty().to_double() << '%' <<
|
||||
"\nConnections = " << m_numConnections << " (" << m_numIncomingConnections << " incoming)"
|
||||
"\nConnections = " << m_numConnections.load() << " (" << m_numIncomingConnections.load() << " incoming)"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -734,8 +733,9 @@ void StratumServer::on_blobs_ready()
|
|||
}
|
||||
}
|
||||
|
||||
if (numClientsProcessed != m_numConnections) {
|
||||
LOGWARN(1, "client list is broken, expected " << m_numConnections << ", got " << numClientsProcessed << " clients");
|
||||
const uint32_t num_connections = m_numConnections;
|
||||
if (numClientsProcessed != num_connections) {
|
||||
LOGWARN(1, "client list is broken, expected " << num_connections << ", got " << numClientsProcessed << " clients");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -155,8 +155,8 @@ protected:
|
|||
uv_mutex_t m_clientsListLock;
|
||||
std::vector<Client*> m_preallocatedClients;
|
||||
Client* m_connectedClientsList;
|
||||
uint32_t m_numConnections;
|
||||
uint32_t m_numIncomingConnections;
|
||||
std::atomic<uint32_t> m_numConnections;
|
||||
std::atomic<uint32_t> m_numIncomingConnections;
|
||||
|
||||
uv_mutex_t m_bansLock;
|
||||
unordered_map<raw_ip, std::chrono::steady_clock::time_point> m_bans;
|
||||
|
|
|
@ -28,8 +28,8 @@ TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::TCPServer(allocate_client_callback all
|
|||
, m_finished(0)
|
||||
, m_listenPort(-1)
|
||||
, m_loopStopped{false}
|
||||
, m_numConnections(0)
|
||||
, m_numIncomingConnections(0)
|
||||
, m_numConnections{ 0 }
|
||||
, m_numIncomingConnections{ 0 }
|
||||
{
|
||||
int err = uv_loop_init(&m_loop);
|
||||
if (err) {
|
||||
|
@ -495,7 +495,7 @@ template<size_t READ_BUF_SIZE, size_t WRITE_BUF_SIZE>
|
|||
void TCPServer<READ_BUF_SIZE, WRITE_BUF_SIZE>::print_status()
|
||||
{
|
||||
LOGINFO(0, "status" <<
|
||||
"\nConnections = " << m_numConnections << " (" << m_numIncomingConnections << " incoming)"
|
||||
"\nConnections = " << m_numConnections.load() << " (" << m_numIncomingConnections.load() << " incoming)"
|
||||
);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue