mirror of
https://github.com/SChernykh/p2pool.git
synced 2024-12-22 19:39:22 +00:00
Refactored array_size to make it always constexpr
This commit is contained in:
parent
39b2167c98
commit
9e90e988fa
5 changed files with 26 additions and 26 deletions
|
@ -66,7 +66,7 @@ BlockTemplate::BlockTemplate(p2pool* pool)
|
|||
m_mempoolTxsOrder.reserve(1024);
|
||||
m_shares.reserve(m_pool->side_chain().chain_window_size() * 2);
|
||||
|
||||
for (size_t i = 0; i < array_size(m_oldTemplates); ++i) {
|
||||
for (size_t i = 0; i < array_size(&BlockTemplate::m_oldTemplates); ++i) {
|
||||
m_oldTemplates[i] = new BlockTemplate(*this);
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ BlockTemplate::BlockTemplate(p2pool* pool)
|
|||
|
||||
BlockTemplate::~BlockTemplate()
|
||||
{
|
||||
for (size_t i = 0; i < array_size(m_oldTemplates); ++i) {
|
||||
for (size_t i = 0; i < array_size(&BlockTemplate::m_oldTemplates); ++i) {
|
||||
delete m_oldTemplates[i];
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, Wallet
|
|||
WriteLock lock(m_lock);
|
||||
|
||||
if (m_templateId > 0) {
|
||||
*m_oldTemplates[m_templateId % array_size(m_oldTemplates)] = *this;
|
||||
*m_oldTemplates[m_templateId % array_size(&BlockTemplate::m_oldTemplates)] = *this;
|
||||
}
|
||||
|
||||
++m_templateId;
|
||||
|
@ -848,7 +848,7 @@ bool BlockTemplate::get_difficulties(const uint32_t template_id, difficulty_type
|
|||
return true;
|
||||
}
|
||||
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(m_oldTemplates)];
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
|
||||
if (old && (template_id == old->m_templateId)) {
|
||||
return old->get_difficulties(template_id, mainchain_difficulty, sidechain_difficulty);
|
||||
|
@ -871,7 +871,7 @@ uint32_t BlockTemplate::get_hashing_blob(const uint32_t template_id, uint32_t ex
|
|||
return get_hashing_blob_nolock(extra_nonce, blob);
|
||||
}
|
||||
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(m_oldTemplates)];
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
|
||||
if (old && (template_id == old->m_templateId)) {
|
||||
return old->get_hashing_blob(template_id, extra_nonce, blob, height, difficulty, sidechain_difficulty, seed_hash, nonce_offset);
|
||||
|
@ -972,7 +972,7 @@ std::vector<uint8_t> BlockTemplate::get_block_template_blob(uint32_t template_id
|
|||
ReadLock lock(m_lock);
|
||||
|
||||
if (template_id != m_templateId) {
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(m_oldTemplates)];
|
||||
const BlockTemplate* old = m_oldTemplates[template_id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
if (old && (template_id == old->m_templateId)) {
|
||||
return old->get_block_template_blob(template_id, nonce_offset, extra_nonce_offset);
|
||||
}
|
||||
|
@ -1035,7 +1035,7 @@ void BlockTemplate::submit_sidechain_block(uint32_t template_id, uint32_t nonce,
|
|||
return;
|
||||
}
|
||||
|
||||
BlockTemplate* old = m_oldTemplates[template_id % array_size(m_oldTemplates)];
|
||||
BlockTemplate* old = m_oldTemplates[template_id % array_size(&BlockTemplate::m_oldTemplates)];
|
||||
|
||||
if (old && (template_id == old->m_templateId)) {
|
||||
old->submit_sidechain_block(template_id, nonce, extra_nonce);
|
||||
|
|
|
@ -54,7 +54,7 @@ RandomX_Hasher::RandomX_Hasher(p2pool* pool)
|
|||
|
||||
const randomx_flags flags = randomx_get_flags();
|
||||
|
||||
for (size_t i = 0; i < array_size(m_cache); ++i) {
|
||||
for (size_t i = 0; i < array_size(&RandomX_Hasher::m_cache); ++i) {
|
||||
m_cache[i] = randomx_alloc_cache(flags | RANDOMX_FLAG_LARGE_PAGES);
|
||||
if (!m_cache[i]) {
|
||||
LOGWARN(1, "couldn't allocate RandomX cache using large pages");
|
||||
|
@ -70,7 +70,7 @@ RandomX_Hasher::RandomX_Hasher(p2pool* pool)
|
|||
uv_rwlock_init_checked(&m_datasetLock);
|
||||
uv_rwlock_init_checked(&m_cacheLock);
|
||||
|
||||
for (size_t i = 0; i < array_size(m_vm); ++i) {
|
||||
for (size_t i = 0; i < array_size(&RandomX_Hasher::m_vm); ++i) {
|
||||
uv_mutex_init_checked(&m_vm[i].mutex);
|
||||
m_vm[i].vm = nullptr;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ RandomX_Hasher::~RandomX_Hasher()
|
|||
uv_rwlock_destroy(&m_datasetLock);
|
||||
uv_rwlock_destroy(&m_cacheLock);
|
||||
|
||||
for (size_t i = 0; i < array_size(m_vm); ++i) {
|
||||
for (size_t i = 0; i < array_size(&RandomX_Hasher::m_vm); ++i) {
|
||||
{
|
||||
MutexLock lock(m_vm[i].mutex);
|
||||
if (m_vm[i].vm) {
|
||||
|
@ -105,7 +105,7 @@ RandomX_Hasher::~RandomX_Hasher()
|
|||
randomx_release_dataset(m_dataset);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < array_size(m_cache); ++i) {
|
||||
for (size_t i = 0; i < array_size(&RandomX_Hasher::m_cache); ++i) {
|
||||
if (m_cache[i]) {
|
||||
randomx_release_cache(m_cache[i]);
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@ bool StratumServer::on_login(StratumClient* client, uint32_t id, const char* log
|
|||
|
||||
job_id = client->m_perConnectionJobId++;
|
||||
|
||||
StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(client->m_jobs)];
|
||||
StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(&StratumClient::m_jobs)];
|
||||
saved_job.job_id = job_id;
|
||||
saved_job.extra_nonce = extra_nonce;
|
||||
saved_job.template_id = template_id;
|
||||
|
@ -309,7 +309,7 @@ bool StratumServer::on_submit(StratumClient* client, uint32_t id, const char* jo
|
|||
{
|
||||
MutexLock lock(client->m_jobsLock);
|
||||
|
||||
const StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(client->m_jobs)];
|
||||
const StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(&StratumClient::m_jobs)];
|
||||
if (saved_job.job_id == job_id) {
|
||||
template_id = saved_job.template_id;
|
||||
extra_nonce = saved_job.extra_nonce;
|
||||
|
@ -509,7 +509,7 @@ void StratumServer::on_blobs_ready()
|
|||
|
||||
job_id = client->m_perConnectionJobId++;
|
||||
|
||||
StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(client->m_jobs)];
|
||||
StratumClient::SavedJob& saved_job = client->m_jobs[job_id % array_size(&StratumClient::m_jobs)];
|
||||
saved_job.job_id = job_id;
|
||||
saved_job.extra_nonce = extra_nonce;
|
||||
saved_job.template_id = data->m_templateId;
|
||||
|
@ -552,10 +552,9 @@ void StratumServer::on_blobs_ready()
|
|||
LOGINFO(3, "sent new job to " << extra_nonce << '/' << numClientsProcessed << " clients");
|
||||
}
|
||||
|
||||
void StratumServer::update_hashrate_data(uint64_t target, time_t timestamp)
|
||||
void StratumServer::update_hashrate_data(uint64_t hashes, time_t timestamp)
|
||||
{
|
||||
uint64_t rem;
|
||||
const uint64_t hashes = (target > 1) ? udiv128(1, 0, target, &rem) : 0;
|
||||
constexpr size_t N = array_size(&StratumServer::m_hashrateData);
|
||||
|
||||
WriteLock lock(m_hashrateDataLock);
|
||||
|
||||
|
@ -567,20 +566,20 @@ void StratumServer::update_hashrate_data(uint64_t target, time_t timestamp)
|
|||
head.m_cumulativeHashes = m_cumulativeHashes;
|
||||
}
|
||||
else {
|
||||
m_hashrateDataHead = (m_hashrateDataHead + 1) % array_size(m_hashrateData);
|
||||
m_hashrateDataHead = (m_hashrateDataHead + 1) % N;
|
||||
data[m_hashrateDataHead] = { timestamp, m_cumulativeHashes };
|
||||
}
|
||||
|
||||
while (data[m_hashrateDataTail_15m].m_timestamp + 15 * 60 < timestamp) {
|
||||
m_hashrateDataTail_15m = (m_hashrateDataTail_15m + 1) % array_size(m_hashrateData);
|
||||
m_hashrateDataTail_15m = (m_hashrateDataTail_15m + 1) % N;
|
||||
}
|
||||
|
||||
while (data[m_hashrateDataTail_1h].m_timestamp + 60 * 60 < timestamp) {
|
||||
m_hashrateDataTail_1h = (m_hashrateDataTail_1h + 1) % array_size(m_hashrateData);
|
||||
m_hashrateDataTail_1h = (m_hashrateDataTail_1h + 1) % N;
|
||||
}
|
||||
|
||||
while (data[m_hashrateDataTail_24h].m_timestamp + 60 * 60 * 24 < timestamp) {
|
||||
m_hashrateDataTail_24h = (m_hashrateDataTail_24h + 1) % array_size(m_hashrateData);
|
||||
m_hashrateDataTail_24h = (m_hashrateDataTail_24h + 1) % N;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -598,6 +597,9 @@ void StratumServer::on_share_found(uv_work_t* req)
|
|||
target = (target >> 32) << 32;
|
||||
}
|
||||
|
||||
uint64_t rem;
|
||||
const uint64_t hashes = (target > 1) ? udiv128(1, 0, target, &rem) : 0;
|
||||
|
||||
if (pool->stopped()) {
|
||||
LOGWARN(0, "p2pool is shutting down, but a share was found. Trying to process it anyway!");
|
||||
}
|
||||
|
@ -635,9 +637,6 @@ void StratumServer::on_share_found(uv_work_t* req)
|
|||
return;
|
||||
}
|
||||
|
||||
uint64_t rem;
|
||||
const uint64_t hashes = (target > 1) ? udiv128(1, 0, target, &rem) : 0;
|
||||
|
||||
const uint64_t n = server->m_cumulativeHashes + hashes;
|
||||
const double diff = sidechain_difficulty.to_double();
|
||||
const double effort = static_cast<double>(n - server->m_cumulativeHashesAtLastShare) * 100.0 / diff;
|
||||
|
@ -655,7 +654,7 @@ void StratumServer::on_share_found(uv_work_t* req)
|
|||
|
||||
if (LIKELY(value < target)) {
|
||||
const time_t timestamp = time(nullptr);
|
||||
server->update_hashrate_data(target, timestamp);
|
||||
server->update_hashrate_data(hashes, timestamp);
|
||||
server->api_update_local_stats(timestamp);
|
||||
share->m_result = SubmittedShare::Result::OK;
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ private:
|
|||
|
||||
time_t m_apiLastUpdateTime;
|
||||
|
||||
void update_hashrate_data(uint64_t target, time_t timestamp);
|
||||
void update_hashrate_data(uint64_t hashes, time_t timestamp);
|
||||
void api_update_local_stats(time_t timestamp);
|
||||
};
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ FORCEINLINE void writeVarint(T value, std::vector<uint8_t>& out)
|
|||
}
|
||||
|
||||
template<typename T, size_t N> FORCEINLINE constexpr size_t array_size(T(&)[N]) { return N; }
|
||||
template<typename T, typename U, size_t N> FORCEINLINE constexpr size_t array_size(T(U::*)[N]) { return N; }
|
||||
|
||||
[[noreturn]] void panic();
|
||||
|
||||
|
|
Loading…
Reference in a new issue