mirror of
https://github.com/Cyrix126/gupaxx.git
synced 2024-12-22 06:39:21 +00:00
fix: get p2pool data request timing right
This commit is contained in:
parent
db39bcaaa2
commit
0145dd7889
2 changed files with 5 additions and 5 deletions
|
@ -546,11 +546,11 @@ impl Helper {
|
|||
let last_p2pool_request_expired =
|
||||
last_p2pool_request.elapsed() >= Duration::from_secs(60);
|
||||
// need to reload fast to get the first right values after syncing.
|
||||
if last_p2pool_request_expired
|
||||
|| (!lock!(pub_api).p2pool_difficulty_u64 > 100_000
|
||||
&& lock!(process).state == ProcessState::Alive)
|
||||
// check if value is 100k or under and request immediately if that's the case. fixed in release of p2pool including commit https://github.com/SChernykh/p2pool/commit/64a199be6dec7924b41f857a401086f25e1ec9be
|
||||
if (last_p2pool_request_expired || lock!(pub_api).p2pool_difficulty_u64 <= 100000)
|
||||
&& lock!(process).state == ProcessState::Alive
|
||||
{
|
||||
debug!("P2Pool Watchdog | Attempting [network] & [pool] API file read");
|
||||
info!("P2Pool Watchdog | Attempting [network] & [pool] API file read");
|
||||
if let (Ok(network_api), Ok(pool_api)) = (
|
||||
Self::path_to_string(&api_path_network, ProcessName::P2pool),
|
||||
Self::path_to_string(&api_path_pool, ProcessName::P2pool),
|
||||
|
|
|
@ -273,7 +273,7 @@ pub const STATUS_SUBMENU_YOUR_P2POOL_DOMINANCE: &str =
|
|||
"The percent of hashrate you account for in P2Pool";
|
||||
pub const STATUS_SUBMENU_YOUR_MONERO_DOMINANCE: &str =
|
||||
"The percent of hashrate you account for in the entire Monero network";
|
||||
pub const STATUS_SUBMENU_PROGRESS_BAR: &str = "The next time Gupaxx will update P2Pool stats. Each [*] is 900ms (updates roughly every 54 seconds)";
|
||||
pub const STATUS_SUBMENU_PROGRESS_BAR: &str = "The next time Gupaxx will update P2Pool stats.";
|
||||
//-- Benchmarks
|
||||
pub const STATUS_SUBMENU_YOUR_CPU: &str = "The CPU detected by Gupaxx";
|
||||
pub const STATUS_SUBMENU_YOUR_BENCHMARKS: &str =
|
||||
|
|
Loading…
Reference in a new issue