mirror of
https://github.com/Cyrix126/gupaxx.git
synced 2024-12-22 06:39:21 +00:00
feat: fix deadlocks
This commit is contained in:
parent
5230d46d93
commit
387f386573
10 changed files with 538 additions and 540 deletions
|
@ -27,6 +27,12 @@ impl eframe::App for App {
|
|||
// These values are checked multiple times so
|
||||
// might as well check only once here to save
|
||||
// on a bunch of [.lock().unwrap()]s.
|
||||
debug!("App | Locking and collecting Node state...");
|
||||
let node = self.node.lock().unwrap();
|
||||
let node_is_alive = node.is_alive();
|
||||
let node_is_waiting = node.is_waiting();
|
||||
let node_state = node.state;
|
||||
drop(node);
|
||||
debug!("App | Locking and collecting P2Pool state...");
|
||||
let p2pool = self.p2pool.lock().unwrap();
|
||||
let p2pool_is_alive = p2pool.is_alive();
|
||||
|
@ -49,14 +55,9 @@ impl eframe::App for App {
|
|||
let xvb = self.xvb.lock().unwrap();
|
||||
let xvb_is_alive = xvb.is_alive();
|
||||
let xvb_is_waiting = xvb.is_waiting();
|
||||
let xvb_is_running = xvb.state == ProcessState::Alive;
|
||||
let xvb_state = xvb.state;
|
||||
drop(xvb);
|
||||
debug!("App | Locking and collecting Node state...");
|
||||
let node = self.node.lock().unwrap();
|
||||
let node_is_alive = node.is_alive();
|
||||
let node_is_waiting = node.is_waiting();
|
||||
let node_state = node.state;
|
||||
drop(node);
|
||||
|
||||
// This sets the top level Ui dimensions.
|
||||
// Used as a reference for other uis.
|
||||
|
@ -139,6 +140,7 @@ impl eframe::App for App {
|
|||
xmrig_is_alive,
|
||||
xmrig_proxy_is_alive,
|
||||
xvb_is_alive,
|
||||
xvb_is_running,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use crate::app::keys::KeyPressed;
|
||||
use crate::app::Tab;
|
||||
use crate::helper::ProcessState;
|
||||
use crate::utils::constants::*;
|
||||
use crate::utils::errors::{ErrorButtons, ErrorFerris};
|
||||
use egui::*;
|
||||
|
@ -25,6 +24,7 @@ impl crate::app::App {
|
|||
xmrig_is_alive: bool,
|
||||
xmrig_proxy_is_alive: bool,
|
||||
xvb_is_alive: bool,
|
||||
xvb_is_running: bool,
|
||||
) {
|
||||
// Middle panel, contents of the [Tab]
|
||||
debug!("App | Rendering CENTRAL_PANEL (tab contents)");
|
||||
|
@ -43,12 +43,14 @@ impl crate::app::App {
|
|||
let distro = true;
|
||||
#[cfg(not(feature = "distro"))]
|
||||
let distro = false;
|
||||
let node_gui_len = self.node_api.lock().unwrap().output.len();
|
||||
let p2pool_gui_len = self.p2pool_api.lock().unwrap().output.len();
|
||||
let xmrig_gui_len = self.xmrig_api.lock().unwrap().output.len();
|
||||
let xmrig_proxy_gui_len = self.xmrig_proxy_api.lock().unwrap().output.len();
|
||||
let gupax_p2pool_api = self.gupax_p2pool_api.lock().unwrap();
|
||||
let debug_info = format!(
|
||||
"Gupax version: {}\n
|
||||
Bundled Node version: {}\n
|
||||
Bundled P2Pool version: {}\n
|
||||
Bundled XMRig version: {}\n
|
||||
Bundled XMRig-Proxy version: {}\n
|
||||
|
@ -114,6 +116,7 @@ path_xmr: {:#?}\n
|
|||
self.state.gupax.absolute_p2pool_path.display(),
|
||||
self.state.gupax.absolute_xmrig_path.display(),
|
||||
self.state.gupax.absolute_xp_path.display(),
|
||||
node_gui_len,
|
||||
p2pool_gui_len,
|
||||
xmrig_gui_len,
|
||||
xmrig_proxy_gui_len,
|
||||
|
@ -181,7 +184,7 @@ path_xmr: {:#?}\n
|
|||
}
|
||||
Tab::Xvb => {
|
||||
debug!("App | Entering [XvB] Tab");
|
||||
crate::disk::state::Xvb::show(&mut self.state.xvb, self.size, &self.state.p2pool.address, ctx, ui, &self.xvb_api, &self.xmrig_api, self.xvb.lock().unwrap().state == ProcessState::Alive);
|
||||
crate::disk::state::Xvb::show(&mut self.state.xvb, self.size, &self.state.p2pool.address, ctx, ui, &self.xvb_api, &self.xmrig_api, xvb_is_running);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -369,8 +369,12 @@ impl Update {
|
|||
info!("Update | Saving state...");
|
||||
let original_version = og.lock().unwrap().version.clone();
|
||||
og.lock().unwrap().version = state_ver;
|
||||
match State::save(&mut og.lock().unwrap(), &state_path) {
|
||||
Ok(_) => info!("Update ... OK"),
|
||||
let mut state = og.lock().unwrap().to_owned();
|
||||
match State::save(&mut state, &state_path) {
|
||||
Ok(_) => {
|
||||
info!("Update ... OK");
|
||||
*og.lock().unwrap() = state;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Update | Saving state ... FAIL: {}", e);
|
||||
og.lock().unwrap().version = original_version;
|
||||
|
|
|
@ -466,39 +466,39 @@ impl Helper {
|
|||
|
||||
// 2. Lock... EVERYTHING!
|
||||
let mut lock = helper.lock().unwrap();
|
||||
debug!("Helper | Locking (1/15) ... [helper]");
|
||||
debug!("Helper | Locked (1/17) ... [helper]");
|
||||
let node = node.lock().unwrap();
|
||||
debug!("Helper | Locking (2/15) ... [helper]");
|
||||
debug!("Helper | Locked (2/17) ... [node]");
|
||||
let p2pool = p2pool.lock().unwrap();
|
||||
debug!("Helper | Locking (3/15) ... [p2pool]");
|
||||
debug!("Helper | Locked (3/17) ... [p2pool]");
|
||||
let xmrig = xmrig.lock().unwrap();
|
||||
debug!("Helper | Locking (4/15) ... [xmrig]");
|
||||
debug!("Helper | Locked (4/17) ... [xmrig]");
|
||||
let xmrig_proxy = xmrig_proxy.lock().unwrap();
|
||||
debug!("Helper | Locking (5/15) ... [xmrig_proxy]");
|
||||
debug!("Helper | Locked (5/17) ... [xmrig_proxy]");
|
||||
let xvb = xvb.lock().unwrap();
|
||||
debug!("Helper | Locking (6/15) ... [xvb]");
|
||||
debug!("Helper | Locked (6/17) ... [xvb]");
|
||||
let mut lock_pub_sys = pub_sys.lock().unwrap();
|
||||
debug!("Helper | Locking (8/15) ... [gui_api_node]");
|
||||
debug!("Helper | Locked (8/17) ... [pub_sys]");
|
||||
let mut gui_api_node = gui_api_node.lock().unwrap();
|
||||
debug!("Helper | Locking (7/15) ... [pub_sys]");
|
||||
debug!("Helper | Locked (7/17) ... [gui_api_node]");
|
||||
let mut gui_api_p2pool = gui_api_p2pool.lock().unwrap();
|
||||
debug!("Helper | Locking (8/15) ... [gui_api_p2pool]");
|
||||
debug!("Helper | Locked (9/17) ... [gui_api_p2pool]");
|
||||
let mut gui_api_xmrig = gui_api_xmrig.lock().unwrap();
|
||||
debug!("Helper | Locking (9/15) ... [gui_api_xmrig]");
|
||||
debug!("Helper | Locked (10/17) ... [gui_api_xmrig]");
|
||||
let mut gui_api_xp = gui_api_xp.lock().unwrap();
|
||||
debug!("Helper | Locking (10/15) ... [gui_api_xp]");
|
||||
debug!("Helper | Locked (11/17) ... [gui_api_xp]");
|
||||
let mut gui_api_xvb = gui_api_xvb.lock().unwrap();
|
||||
debug!("Helper | Locking (11/15) ... [gui_api_xvb]");
|
||||
debug!("Helper | Locked (12/17) ... [gui_api_xvb]");
|
||||
let mut pub_api_node = pub_api_node.lock().unwrap();
|
||||
debug!("Helper | Locking (14/15) ... [pub_api_node]");
|
||||
debug!("Helper | Locked (13/17) ... [pub_api_node]");
|
||||
let mut pub_api_p2pool = pub_api_p2pool.lock().unwrap();
|
||||
debug!("Helper | Locking (14/15) ... [pub_api_p2pool]");
|
||||
debug!("Helper | Locked (14/17) ... [pub_api_p2pool]");
|
||||
let mut pub_api_xmrig = pub_api_xmrig.lock().unwrap();
|
||||
debug!("Helper | Locking (13/15) ... [pub_api_xmrig]");
|
||||
debug!("Helper | Locked (15/17) ... [pub_api_xmrig]");
|
||||
let mut pub_api_xp = pub_api_xp.lock().unwrap();
|
||||
debug!("Helper | Locking (14/15) ... [pub_api_xp]");
|
||||
debug!("Helper | Locked (16/17) ... [pub_api_xp]");
|
||||
let mut pub_api_xvb = pub_api_xvb.lock().unwrap();
|
||||
debug!("Helper | Locking (15/15) ... [pub_api_xvb]");
|
||||
debug!("Helper | Locked (17/17) ... [pub_api_xvb]");
|
||||
// Calculate Gupax's uptime always.
|
||||
lock.uptime = HumanTime::into_human(lock.instant.elapsed());
|
||||
// If [Node] is alive...
|
||||
|
@ -558,39 +558,39 @@ impl Helper {
|
|||
|
||||
// 3. Drop... (almost) EVERYTHING... IN REVERSE!
|
||||
drop(lock_pub_sys);
|
||||
debug!("Helper | Unlocking (1/12) ... [pub_sys]");
|
||||
debug!("Helper | Unlocking (1/17) ... [pub_sys]");
|
||||
drop(xvb);
|
||||
debug!("Helper | Unlocking (2/12) ... [xvb]");
|
||||
debug!("Helper | Unlocking (2/17) ... [xvb]");
|
||||
drop(xmrig_proxy);
|
||||
debug!("Helper | Unlocking (3/12) ... [xmrig_proxy]");
|
||||
debug!("Helper | Unlocking (3/17) ... [xmrig_proxy]");
|
||||
drop(xmrig);
|
||||
debug!("Helper | Unlocking (3/12) ... [xmrig]");
|
||||
debug!("Helper | Unlocking (4/17) ... [xmrig]");
|
||||
drop(p2pool);
|
||||
debug!("Helper | Unlocking (4/12) ... [p2pool]");
|
||||
debug!("Helper | Unlocking (5/17) ... [p2pool]");
|
||||
drop(node);
|
||||
debug!("Helper | Unlocking (4/12) ... [node]");
|
||||
debug!("Helper | Unlocking (6/17) ... [node]");
|
||||
drop(pub_api_xvb);
|
||||
debug!("Helper | Unlocking (5/12) ... [pub_api_xvb]");
|
||||
debug!("Helper | Unlocking (7/17) ... [pub_api_xvb]");
|
||||
drop(pub_api_xp);
|
||||
debug!("Helper | Unlocking (6/12) ... [pub_api_xp]");
|
||||
debug!("Helper | Unlocking (8/17) ... [pub_api_xp]");
|
||||
drop(pub_api_xmrig);
|
||||
debug!("Helper | Unlocking (6/12) ... [pub_api_xmrig]");
|
||||
debug!("Helper | Unlocking (9/17) ... [pub_api_xmrig]");
|
||||
drop(pub_api_p2pool);
|
||||
debug!("Helper | Unlocking (7/12) ... [pub_api_p2pool]");
|
||||
debug!("Helper | Unlocking (10/17) ... [pub_api_p2pool]");
|
||||
drop(pub_api_node);
|
||||
debug!("Helper | Unlocking (7/12) ... [node]");
|
||||
debug!("Helper | Unlocking (11/17) ... [pub_api_node]");
|
||||
drop(gui_api_xvb);
|
||||
debug!("Helper | Unlocking (8/12) ... [gui_api_xvb]");
|
||||
debug!("Helper | Unlocking (12/17) ... [gui_api_xvb]");
|
||||
drop(gui_api_xp);
|
||||
debug!("Helper | Unlocking (9/12) ... [gui_api_xp]");
|
||||
debug!("Helper | Unlocking (13/17) ... [gui_api_xp]");
|
||||
drop(gui_api_xmrig);
|
||||
debug!("Helper | Unlocking (10/12) ... [gui_api_xmrig]");
|
||||
debug!("Helper | Unlocking (14/17) ... [gui_api_xmrig]");
|
||||
drop(gui_api_p2pool);
|
||||
debug!("Helper | Unlocking (11/12) ... [gui_api_p2pool]");
|
||||
debug!("Helper | Unlocking (15/17) ... [gui_api_p2pool]");
|
||||
drop(gui_api_node);
|
||||
debug!("Helper | Unlocking (11/12) ... [node]");
|
||||
debug!("Helper | Unlocking (16/17) ... [gui_api_node]");
|
||||
drop(lock);
|
||||
debug!("Helper | Unlocking (12/12) ... [helper]");
|
||||
debug!("Helper | Unlocking (17/17) ... [helper]");
|
||||
|
||||
// 4. Calculate if we should sleep or not.
|
||||
// If we should sleep, how long?
|
||||
|
@ -686,71 +686,63 @@ fn check_user_input(process: &Arc<Mutex<Process>>, stdin: &mut Box<dyn std::io::
|
|||
}
|
||||
}
|
||||
fn signal_end(
|
||||
process: &Arc<Mutex<Process>>,
|
||||
process: &mut Process,
|
||||
child_pty: &Arc<Mutex<Box<dyn Child + Sync + Send>>>,
|
||||
start: &Instant,
|
||||
gui_api_output_raw: &mut String,
|
||||
) -> bool {
|
||||
if process.lock().unwrap().signal == ProcessSignal::Stop {
|
||||
debug!("{} Watchdog | Stop SIGNAL caught", process.lock().unwrap().name);
|
||||
let mut child_pty_lock = child_pty.lock().unwrap();
|
||||
if process.signal == ProcessSignal::Stop {
|
||||
debug!("{} Watchdog | Stop SIGNAL caught", process.name);
|
||||
// This actually sends a SIGHUP to p2pool (closes the PTY, hangs up on p2pool)
|
||||
if let Err(e) = child_pty.lock().unwrap().kill() {
|
||||
error!("{} Watchdog | Kill error: {}", process.lock().unwrap().name, e);
|
||||
if let Err(e) = child_pty_lock.kill() {
|
||||
error!("{} Watchdog | Kill error: {}", process.name, e);
|
||||
}
|
||||
// Wait to get the exit status
|
||||
let exit_status = match child_pty.lock().unwrap().wait() {
|
||||
let exit_status = match child_pty_lock.wait() {
|
||||
Ok(e) => {
|
||||
if e.success() {
|
||||
process.lock().unwrap().state = ProcessState::Dead;
|
||||
process.state = ProcessState::Dead;
|
||||
"Successful"
|
||||
} else {
|
||||
process.lock().unwrap().state = ProcessState::Failed;
|
||||
process.state = ProcessState::Failed;
|
||||
"Failed"
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
process.lock().unwrap().state = ProcessState::Failed;
|
||||
process.state = ProcessState::Failed;
|
||||
"Unknown Error"
|
||||
}
|
||||
};
|
||||
let uptime = HumanTime::into_human(start.elapsed());
|
||||
info!(
|
||||
"{} Watchdog | Stopped ... Uptime was: [{}], Exit status: [{}]",
|
||||
process.lock().unwrap().name,
|
||||
uptime,
|
||||
exit_status
|
||||
process.name, uptime, exit_status
|
||||
);
|
||||
// This is written directly into the GUI API, because sometimes the 900ms event loop can't catch it.
|
||||
let name = process.name.to_owned();
|
||||
if let Err(e) = writeln!(
|
||||
gui_api_output_raw,
|
||||
"{}\n{} stopped | Uptime: [{}] | Exit status: [{}]\n{}\n\n\n\n",
|
||||
process.lock().unwrap().name,
|
||||
HORI_CONSOLE,
|
||||
uptime,
|
||||
exit_status,
|
||||
HORI_CONSOLE
|
||||
name, HORI_CONSOLE, uptime, exit_status, HORI_CONSOLE
|
||||
) {
|
||||
error!(
|
||||
"{} Watchdog | GUI Uptime/Exit status write failed: {}",
|
||||
process.lock().unwrap().name,
|
||||
e
|
||||
name, e
|
||||
);
|
||||
}
|
||||
process.lock().unwrap().signal = ProcessSignal::None;
|
||||
debug!(
|
||||
"{} Watchdog | Stop SIGNAL done, breaking",
|
||||
process.lock().unwrap().name,
|
||||
);
|
||||
process.signal = ProcessSignal::None;
|
||||
debug!("{} Watchdog | Stop SIGNAL done, breaking", process.name,);
|
||||
return true;
|
||||
// Check RESTART
|
||||
} else if process.lock().unwrap().signal == ProcessSignal::Restart {
|
||||
debug!("{} Watchdog | Restart SIGNAL caught", process.lock().unwrap().name,);
|
||||
} else if process.signal == ProcessSignal::Restart {
|
||||
debug!("{} Watchdog | Restart SIGNAL caught", process.name,);
|
||||
// This actually sends a SIGHUP to p2pool (closes the PTY, hangs up on p2pool)
|
||||
if let Err(e) = child_pty.lock().unwrap().kill() {
|
||||
error!("{} Watchdog | Kill error: {}", process.lock().unwrap().name, e);
|
||||
if let Err(e) = child_pty_lock.kill() {
|
||||
error!("{} Watchdog | Kill error: {}", process.name, e);
|
||||
}
|
||||
// Wait to get the exit status
|
||||
let exit_status = match child_pty.lock().unwrap().wait() {
|
||||
let exit_status = match child_pty_lock.wait() {
|
||||
Ok(e) => {
|
||||
if e.success() {
|
||||
"Successful"
|
||||
|
@ -763,31 +755,22 @@ fn signal_end(
|
|||
let uptime = HumanTime::into_human(start.elapsed());
|
||||
info!(
|
||||
"{} Watchdog | Stopped ... Uptime was: [{}], Exit status: [{}]",
|
||||
process.lock().unwrap().name,
|
||||
uptime,
|
||||
exit_status
|
||||
process.name, uptime, exit_status
|
||||
);
|
||||
// This is written directly into the GUI API, because sometimes the 900ms event loop can't catch it.
|
||||
let name = process.name.to_owned();
|
||||
if let Err(e) = writeln!(
|
||||
gui_api_output_raw,
|
||||
"{}\n{} stopped | Uptime: [{}] | Exit status: [{}]\n{}\n\n\n\n",
|
||||
process.lock().unwrap().name,
|
||||
HORI_CONSOLE,
|
||||
uptime,
|
||||
exit_status,
|
||||
HORI_CONSOLE
|
||||
name, HORI_CONSOLE, uptime, exit_status, HORI_CONSOLE
|
||||
) {
|
||||
error!(
|
||||
"{} Watchdog | GUI Uptime/Exit status write failed: {}",
|
||||
process.lock().unwrap().name,
|
||||
e
|
||||
name, e
|
||||
);
|
||||
}
|
||||
process.lock().unwrap().state = ProcessState::Waiting;
|
||||
debug!(
|
||||
"{} Watchdog | Restart SIGNAL done, breaking",
|
||||
process.lock().unwrap().name,
|
||||
);
|
||||
process.state = ProcessState::Waiting;
|
||||
debug!("{} Watchdog | Restart SIGNAL done, breaking", process.name,);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
|
@ -796,8 +779,8 @@ async fn sleep_end_loop(now: Instant, name: ProcessName) {
|
|||
// Sleep (only if 999ms hasn't passed)
|
||||
let elapsed = now.elapsed().as_millis();
|
||||
// Since logic goes off if less than 1000, casting should be safe
|
||||
if elapsed < 999 {
|
||||
let sleep = (999 - elapsed) as u64;
|
||||
if elapsed < 1000 {
|
||||
let sleep = (1000 - elapsed) as u64;
|
||||
debug!(
|
||||
"{} Watchdog | END OF LOOP - Sleeping for [{}]ms...",
|
||||
name, sleep
|
||||
|
|
|
@ -227,56 +227,62 @@ impl Helper {
|
|||
loop {
|
||||
let now = Instant::now();
|
||||
debug!("Node Watchdog | ----------- Start of loop -----------");
|
||||
|
||||
// check state
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check signal
|
||||
if signal_end(
|
||||
process,
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check user input
|
||||
check_user_input(process, &mut stdin);
|
||||
// get data output/api
|
||||
|
||||
// Check if logs need resetting
|
||||
debug!("Node Watchdog | Attempting GUI log reset check");
|
||||
{
|
||||
let mut lock = gui_api.lock().unwrap();
|
||||
Self::check_reset_gui_output(&mut lock.output, ProcessName::Node);
|
||||
}
|
||||
// No need to check output since monerod has a sufficient API
|
||||
// Always update from output
|
||||
debug!("Node Watchdog | Starting [update_from_output()]");
|
||||
PubNodeApi::update_from_output(pub_api, &output_pub, start.elapsed());
|
||||
// update data from api
|
||||
debug!("Node Watchdog | Attempting HTTP API request...");
|
||||
match PrivNodeApi::request_api(&client, &state).await {
|
||||
Ok(priv_api) => {
|
||||
debug!("Node Watchdog | HTTP API request OK, attempting [update_from_priv()]");
|
||||
if priv_api.result.synchronized && priv_api.result.status == "OK" {
|
||||
process.lock().unwrap().state = ProcessState::Alive
|
||||
}
|
||||
PubNodeApi::update_from_priv(pub_api, priv_api);
|
||||
// scope to drop locked mutex before the sleep
|
||||
// check state
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
// if node is just starting, do not throw an error
|
||||
if start.elapsed() > Duration::from_secs(10) {
|
||||
warn!(
|
||||
"Node Watchdog | Could not send HTTP API request to node\n{}",
|
||||
err
|
||||
// check signal
|
||||
if signal_end(
|
||||
&mut process.lock().unwrap(),
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check user input
|
||||
check_user_input(process, &mut stdin);
|
||||
// get data output/api
|
||||
|
||||
// Check if logs need resetting
|
||||
debug!("Node Watchdog | Attempting GUI log reset check");
|
||||
{
|
||||
Self::check_reset_gui_output(
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
ProcessName::Node,
|
||||
);
|
||||
}
|
||||
// No need to check output since monerod has a sufficient API
|
||||
// Always update from output
|
||||
debug!("Node Watchdog | Starting [update_from_output()]");
|
||||
PubNodeApi::update_from_output(pub_api, &output_pub, start.elapsed());
|
||||
// update data from api
|
||||
debug!("Node Watchdog | Attempting HTTP API request...");
|
||||
match PrivNodeApi::request_api(&client, &state).await {
|
||||
Ok(priv_api) => {
|
||||
debug!(
|
||||
"Node Watchdog | HTTP API request OK, attempting [update_from_priv()]"
|
||||
);
|
||||
if priv_api.result.synchronized && priv_api.result.status == "OK" {
|
||||
process.lock().unwrap().state = ProcessState::Alive
|
||||
}
|
||||
PubNodeApi::update_from_priv(pub_api, priv_api);
|
||||
}
|
||||
Err(err) => {
|
||||
// if node is just starting, do not throw an error
|
||||
if start.elapsed() > Duration::from_secs(10) {
|
||||
warn!(
|
||||
"Node Watchdog | Could not send HTTP API request to node\n{}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -527,109 +527,111 @@ impl Helper {
|
|||
// Set timer
|
||||
let now = Instant::now();
|
||||
debug!("P2Pool Watchdog | ----------- Start of loop -----------");
|
||||
gui_api.lock().unwrap().tick = (last_p2pool_request.elapsed().as_secs() % 60) as u8;
|
||||
|
||||
// Check if the process is secretly died without us knowing :)
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Check SIGNAL
|
||||
if signal_end(
|
||||
&process,
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// Check vector of user input
|
||||
check_user_input(&process, &mut stdin);
|
||||
// Check if logs need resetting
|
||||
debug!("P2Pool Watchdog | Attempting GUI log reset check");
|
||||
let mut lock = gui_api.lock().unwrap();
|
||||
Self::check_reset_gui_output(&mut lock.output, ProcessName::P2pool);
|
||||
drop(lock);
|
||||
|
||||
// Always update from output
|
||||
debug!("P2Pool Watchdog | Starting [update_from_output()]");
|
||||
PubP2poolApi::update_from_output(
|
||||
&pub_api,
|
||||
&output_parse,
|
||||
&output_pub,
|
||||
start.elapsed(),
|
||||
&process,
|
||||
);
|
||||
|
||||
// Read [local] API
|
||||
debug!("P2Pool Watchdog | Attempting [local] API file read");
|
||||
if let Ok(string) = Self::path_to_string(&api_path_local, ProcessName::P2pool) {
|
||||
// Deserialize
|
||||
if let Ok(local_api) = PrivP2poolLocalApi::from_str(&string) {
|
||||
// Update the structs.
|
||||
PubP2poolApi::update_from_local(&pub_api, local_api);
|
||||
}
|
||||
}
|
||||
// If more than 1 minute has passed, read the other API files.
|
||||
let last_p2pool_request_expired =
|
||||
last_p2pool_request.elapsed() >= Duration::from_secs(60);
|
||||
// need to reload fast to get the first right values after syncing.
|
||||
// check if value is 100k or under and request immediately if that's the case. fixed in release of p2pool including commit https://github.com/SChernykh/p2pool/commit/64a199be6dec7924b41f857a401086f25e1ec9be
|
||||
if (last_p2pool_request_expired
|
||||
|| pub_api.lock().unwrap().p2pool_difficulty_u64 <= 100000)
|
||||
&& process.lock().unwrap().state == ProcessState::Alive
|
||||
{
|
||||
debug!("P2Pool Watchdog | Attempting [network] & [pool] API file read");
|
||||
if let (Ok(network_api), Ok(pool_api)) = (
|
||||
Self::path_to_string(&api_path_network, ProcessName::P2pool),
|
||||
Self::path_to_string(&api_path_pool, ProcessName::P2pool),
|
||||
gui_api.lock().unwrap().tick = (last_p2pool_request.elapsed().as_secs() % 60) as u8;
|
||||
// Check if the process is secretly died without us knowing :)
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
if let (Ok(network_api), Ok(pool_api)) = (
|
||||
PrivP2poolNetworkApi::from_str(&network_api),
|
||||
PrivP2poolPoolApi::from_str(&pool_api),
|
||||
) {
|
||||
PubP2poolApi::update_from_network_pool(&pub_api, network_api, pool_api);
|
||||
last_p2pool_request = tokio::time::Instant::now();
|
||||
break;
|
||||
}
|
||||
|
||||
// Check SIGNAL
|
||||
if signal_end(
|
||||
&mut process.lock().unwrap(),
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// Check vector of user input
|
||||
check_user_input(&process, &mut stdin);
|
||||
// Check if logs need resetting
|
||||
debug!("P2Pool Watchdog | Attempting GUI log reset check");
|
||||
let mut lock = gui_api.lock().unwrap();
|
||||
Self::check_reset_gui_output(&mut lock.output, ProcessName::P2pool);
|
||||
drop(lock);
|
||||
|
||||
// Always update from output
|
||||
debug!("P2Pool Watchdog | Starting [update_from_output()]");
|
||||
let mut process_lock = process.lock().unwrap();
|
||||
let mut pub_api_lock = pub_api.lock().unwrap();
|
||||
PubP2poolApi::update_from_output(
|
||||
&mut pub_api_lock,
|
||||
&output_parse,
|
||||
&output_pub,
|
||||
start.elapsed(),
|
||||
&mut process_lock,
|
||||
);
|
||||
|
||||
// Read [local] API
|
||||
debug!("P2Pool Watchdog | Attempting [local] API file read");
|
||||
if let Ok(string) = Self::path_to_string(&api_path_local, ProcessName::P2pool) {
|
||||
// Deserialize
|
||||
if let Ok(local_api) = PrivP2poolLocalApi::from_str(&string) {
|
||||
// Update the structs.
|
||||
PubP2poolApi::update_from_local(&mut pub_api_lock, local_api);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let last_status_request_expired =
|
||||
last_status_request.elapsed() >= Duration::from_secs(60);
|
||||
|
||||
if (last_status_request_expired || first_loop)
|
||||
&& process.lock().unwrap().state == ProcessState::Alive
|
||||
{
|
||||
debug!("P2Pool Watchdog | Reading status output of p2pool node");
|
||||
#[cfg(target_os = "windows")]
|
||||
if let Err(e) = write!(stdin, "statusfromgupaxx\r\n") {
|
||||
error!("P2Pool Watchdog | STDIN error: {}", e);
|
||||
// If more than 1 minute has passed, read the other API files.
|
||||
let last_p2pool_request_expired =
|
||||
last_p2pool_request.elapsed() >= Duration::from_secs(60);
|
||||
// need to reload fast to get the first right values after syncing.
|
||||
// check if value is 100k or under and request immediately if that's the case. fixed in release of p2pool including commit https://github.com/SChernykh/p2pool/commit/64a199be6dec7924b41f857a401086f25e1ec9be
|
||||
if (last_p2pool_request_expired
|
||||
|| pub_api.lock().unwrap().p2pool_difficulty_u64 <= 100000)
|
||||
&& process_lock.state == ProcessState::Alive
|
||||
{
|
||||
debug!("P2Pool Watchdog | Attempting [network] & [pool] API file read");
|
||||
if let (Ok(network_api), Ok(pool_api)) = (
|
||||
Self::path_to_string(&api_path_network, ProcessName::P2pool),
|
||||
Self::path_to_string(&api_path_pool, ProcessName::P2pool),
|
||||
) {
|
||||
if let (Ok(network_api), Ok(pool_api)) = (
|
||||
PrivP2poolNetworkApi::from_str(&network_api),
|
||||
PrivP2poolPoolApi::from_str(&pool_api),
|
||||
) {
|
||||
PubP2poolApi::update_from_network_pool(
|
||||
&mut pub_api_lock,
|
||||
network_api,
|
||||
pool_api,
|
||||
);
|
||||
last_p2pool_request = tokio::time::Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(target_family = "unix")]
|
||||
if let Err(e) = writeln!(stdin, "statusfromgupaxx") {
|
||||
error!("P2Pool Watchdog | STDIN error: {}", e);
|
||||
}
|
||||
// Flush.
|
||||
if let Err(e) = stdin.flush() {
|
||||
error!("P2Pool Watchdog | STDIN flush error: {}", e);
|
||||
}
|
||||
last_status_request = tokio::time::Instant::now();
|
||||
}
|
||||
|
||||
// Sleep (only if 900ms hasn't passed)
|
||||
if first_loop {
|
||||
first_loop = false;
|
||||
}
|
||||
let last_status_request_expired =
|
||||
last_status_request.elapsed() >= Duration::from_secs(60);
|
||||
if (last_status_request_expired || first_loop)
|
||||
&& process_lock.state == ProcessState::Alive
|
||||
{
|
||||
debug!("P2Pool Watchdog | Reading status output of p2pool node");
|
||||
#[cfg(target_os = "windows")]
|
||||
if let Err(e) = write!(stdin, "statusfromgupaxx\r\n") {
|
||||
error!("P2Pool Watchdog | STDIN error: {}", e);
|
||||
}
|
||||
#[cfg(target_family = "unix")]
|
||||
if let Err(e) = writeln!(stdin, "statusfromgupaxx") {
|
||||
error!("P2Pool Watchdog | STDIN error: {}", e);
|
||||
}
|
||||
// Flush.
|
||||
if let Err(e) = stdin.flush() {
|
||||
error!("P2Pool Watchdog | STDIN flush error: {}", e);
|
||||
}
|
||||
last_status_request = tokio::time::Instant::now();
|
||||
}
|
||||
|
||||
// Sleep (only if 900ms hasn't passed)
|
||||
if first_loop {
|
||||
first_loop = false;
|
||||
}
|
||||
} // end of scope to drop lock
|
||||
sleep_end_loop(now, ProcessName::P2pool).await;
|
||||
debug!(
|
||||
"P2Pool Watchdog | END OF LOOP - Tick: [{}/60]",
|
||||
gui_api.lock().unwrap().tick,
|
||||
);
|
||||
}
|
||||
|
||||
// 5. If loop broke, we must be done here.
|
||||
|
@ -826,46 +828,39 @@ impl PubP2poolApi {
|
|||
|
||||
// Mutate "watchdog"'s [PubP2poolApi] with data the process output.
|
||||
pub(super) fn update_from_output(
|
||||
public: &Arc<Mutex<Self>>,
|
||||
public: &mut Self,
|
||||
output_parse: &Arc<Mutex<String>>,
|
||||
output_pub: &Arc<Mutex<String>>,
|
||||
elapsed: std::time::Duration,
|
||||
process: &Arc<Mutex<Process>>,
|
||||
process: &mut Process,
|
||||
) {
|
||||
// 1. Take the process's current output buffer and combine it with Pub (if not empty)
|
||||
let mut output_pub = output_pub.lock().unwrap();
|
||||
if !output_pub.is_empty() {
|
||||
public
|
||||
.lock()
|
||||
.unwrap()
|
||||
.output
|
||||
.push_str(&std::mem::take(&mut *output_pub));
|
||||
public.output.push_str(&std::mem::take(&mut *output_pub));
|
||||
}
|
||||
|
||||
// 2. Parse the full STDOUT
|
||||
let mut output_parse = output_parse.lock().unwrap();
|
||||
let (payouts_new, xmr_new) = Self::calc_payouts_and_xmr(&output_parse);
|
||||
// Check for "SYNCHRONIZED" only if we aren't already.
|
||||
if process.lock().unwrap().state == ProcessState::Syncing {
|
||||
if process.state == ProcessState::Syncing {
|
||||
// look for depth 0
|
||||
|
||||
if P2POOL_REGEX.depth_0.is_match(&output_parse) {
|
||||
process.lock().unwrap().state = ProcessState::Alive;
|
||||
process.state = ProcessState::Alive;
|
||||
}
|
||||
}
|
||||
// check if zmq server still alive
|
||||
if process.lock().unwrap().state == ProcessState::Alive
|
||||
&& contains_zmq_connection_lost(&output_parse)
|
||||
{
|
||||
if process.state == ProcessState::Alive && contains_zmq_connection_lost(&output_parse) {
|
||||
// node zmq is not responding, p2pool is not ready
|
||||
process.lock().unwrap().state = ProcessState::Syncing;
|
||||
process.state = ProcessState::Syncing;
|
||||
}
|
||||
|
||||
// 3. Throw away [output_parse]
|
||||
output_parse.clear();
|
||||
drop(output_parse);
|
||||
// 4. Add to current values
|
||||
let mut public = public.lock().unwrap();
|
||||
let (payouts, xmr) = (public.payouts + payouts_new, public.xmr + xmr_new);
|
||||
|
||||
// 5. Calculate hour/day/month given elapsed time
|
||||
|
@ -912,13 +907,12 @@ impl PubP2poolApi {
|
|||
xmr_hour,
|
||||
xmr_day,
|
||||
xmr_month,
|
||||
..std::mem::take(&mut *public)
|
||||
..std::mem::take(public)
|
||||
};
|
||||
}
|
||||
|
||||
// Mutate [PubP2poolApi] with data from a [PrivP2poolLocalApi] and the process output.
|
||||
pub(super) fn update_from_local(public: &Arc<Mutex<Self>>, local: PrivP2poolLocalApi) {
|
||||
let mut public = public.lock().unwrap();
|
||||
pub(super) fn update_from_local(public: &mut Self, local: PrivP2poolLocalApi) {
|
||||
*public = Self {
|
||||
hashrate_15m: HumanNumber::from_u64(local.hashrate_15m),
|
||||
hashrate_1h: HumanNumber::from_u64(local.hashrate_1h),
|
||||
|
@ -934,11 +928,11 @@ impl PubP2poolApi {
|
|||
|
||||
// Mutate [PubP2poolApi] with data from a [PrivP2pool(Network|Pool)Api].
|
||||
pub(super) fn update_from_network_pool(
|
||||
public: &Arc<Mutex<Self>>,
|
||||
public: &mut Self,
|
||||
net: PrivP2poolNetworkApi,
|
||||
pool: PrivP2poolPoolApi,
|
||||
) {
|
||||
let user_hashrate = public.lock().unwrap().user_p2pool_hashrate_u64; // The user's total P2Pool hashrate
|
||||
let user_hashrate = public.user_p2pool_hashrate_u64; // The user's total P2Pool hashrate
|
||||
let monero_difficulty = net.difficulty;
|
||||
let monero_hashrate = monero_difficulty / MONERO_BLOCK_TIME_IN_SECONDS;
|
||||
let p2pool_hashrate = pool.pool_statistics.hashRate;
|
||||
|
@ -980,7 +974,6 @@ impl PubP2poolApi {
|
|||
p2pool_difficulty / user_hashrate,
|
||||
));
|
||||
}
|
||||
let mut public = public.lock().unwrap();
|
||||
*public = Self {
|
||||
p2pool_difficulty_u64: p2pool_difficulty,
|
||||
monero_difficulty_u64: monero_difficulty,
|
||||
|
|
|
@ -116,8 +116,14 @@ Uptime = 0h 2m 4s
|
|||
"".to_string(),
|
||||
PathBuf::new(),
|
||||
)));
|
||||
PubP2poolApi::update_from_output(&public, &output_parse, &output_pub, elapsed, &process);
|
||||
let public = public.lock().unwrap();
|
||||
let mut public = public.lock().unwrap();
|
||||
PubP2poolApi::update_from_output(
|
||||
&mut public,
|
||||
&output_parse,
|
||||
&output_pub,
|
||||
elapsed,
|
||||
&mut process.lock().unwrap(),
|
||||
);
|
||||
println!("{:#?}", public);
|
||||
assert_eq!(public.payouts, 3);
|
||||
assert_eq!(public.payouts_hour, 180.0);
|
||||
|
@ -161,8 +167,8 @@ Uptime = 0h 2m 4s
|
|||
},
|
||||
};
|
||||
// Update Local
|
||||
PubP2poolApi::update_from_local(&public, local);
|
||||
let p = public.lock().unwrap();
|
||||
let mut p = public.lock().unwrap();
|
||||
PubP2poolApi::update_from_local(&mut p, local);
|
||||
println!("AFTER LOCAL: {:#?}", p);
|
||||
assert_eq!(p.hashrate_15m.to_string(), "10,000");
|
||||
assert_eq!(p.hashrate_1h.to_string(), "20,000");
|
||||
|
@ -175,10 +181,8 @@ Uptime = 0h 2m 4s
|
|||
assert_eq!(p.current_effort.to_string(), "200.00%");
|
||||
assert_eq!(p.connections.to_string(), "1,234");
|
||||
assert_eq!(p.user_p2pool_hashrate_u64, 20000);
|
||||
drop(p);
|
||||
// Update Network + Pool
|
||||
PubP2poolApi::update_from_network_pool(&public, network, pool);
|
||||
let p = public.lock().unwrap();
|
||||
PubP2poolApi::update_from_network_pool(&mut p, network, pool);
|
||||
println!("AFTER NETWORK+POOL: {:#?}", p);
|
||||
assert_eq!(p.monero_difficulty.to_string(), "300,000,000,000");
|
||||
assert_eq!(p.monero_hashrate.to_string(), "2.500 GH/s");
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use crate::constants::*;
|
||||
use crate::helper::xrig::update_xmrig_config;
|
||||
use crate::helper::{check_died, check_user_input, sleep_end_loop, Process};
|
||||
use crate::helper::{arc_mut, check_died, check_user_input, sleep, sleep_end_loop, Process};
|
||||
use crate::helper::{Helper, ProcessName, ProcessSignal, ProcessState};
|
||||
use crate::helper::{PubXvbApi, XvbNode};
|
||||
use crate::miscs::output_console;
|
||||
use crate::regex::{contains_error, contains_usepool, detect_new_node_xmrig, XMRIG_REGEX};
|
||||
use crate::utils::human::HumanNumber;
|
||||
use crate::utils::sudo::SudoState;
|
||||
use crate::{constants::*, macros::*};
|
||||
use enclose::enclose;
|
||||
use log::*;
|
||||
use portable_pty::Child;
|
||||
|
@ -514,7 +514,7 @@ impl Helper {
|
|||
}
|
||||
// Stop on [Stop/Restart] SIGNAL
|
||||
if Self::xmrig_signal_end(
|
||||
&process,
|
||||
&mut process.lock().unwrap(),
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
|
@ -591,13 +591,13 @@ impl Helper {
|
|||
info!("XMRig Watchdog | Watchdog thread exiting... Goodbye!");
|
||||
}
|
||||
fn xmrig_signal_end(
|
||||
process: &Arc<Mutex<Process>>,
|
||||
process: &mut Process,
|
||||
child_pty: &Arc<Mutex<Box<dyn Child + Sync + Send>>>,
|
||||
start: &Instant,
|
||||
gui_api_output_raw: &mut String,
|
||||
sudo: &Arc<Mutex<SudoState>>,
|
||||
) -> bool {
|
||||
let signal = process.lock().unwrap().signal;
|
||||
let signal = process.signal;
|
||||
if signal == ProcessSignal::Stop || signal == ProcessSignal::Restart {
|
||||
debug!("XMRig Watchdog | Stop/Restart SIGNAL caught");
|
||||
// macOS requires [sudo] again to kill [XMRig]
|
||||
|
@ -616,7 +616,6 @@ impl Helper {
|
|||
}
|
||||
let exit_status = match child_pty.lock().unwrap().wait() {
|
||||
Ok(e) => {
|
||||
let mut process = process.lock().unwrap();
|
||||
if e.success() {
|
||||
if process.signal == ProcessSignal::Stop {
|
||||
process.state = ProcessState::Dead;
|
||||
|
@ -630,7 +629,6 @@ impl Helper {
|
|||
}
|
||||
}
|
||||
_ => {
|
||||
let mut process = process.lock().unwrap();
|
||||
if process.signal == ProcessSignal::Stop {
|
||||
process.state = ProcessState::Failed;
|
||||
}
|
||||
|
@ -652,7 +650,6 @@ impl Helper {
|
|||
e
|
||||
);
|
||||
}
|
||||
let mut process = process.lock().unwrap();
|
||||
match process.signal {
|
||||
ProcessSignal::Stop => process.signal = ProcessSignal::None,
|
||||
ProcessSignal::Restart => process.state = ProcessState::Waiting,
|
||||
|
|
|
@ -371,91 +371,93 @@ impl Helper {
|
|||
loop {
|
||||
let now = Instant::now();
|
||||
debug!("XMRig-Proxy Watchdog | ----------- Start of loop -----------");
|
||||
// check state
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check signal
|
||||
if signal_end(
|
||||
process,
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check user input
|
||||
check_user_input(process, &mut stdin);
|
||||
// get data output/api
|
||||
{
|
||||
if check_died(
|
||||
&child_pty,
|
||||
&mut process.lock().unwrap(),
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check signal
|
||||
if signal_end(
|
||||
&mut process.lock().unwrap(),
|
||||
&child_pty,
|
||||
&start,
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
) {
|
||||
break;
|
||||
}
|
||||
// check user input
|
||||
check_user_input(process, &mut stdin);
|
||||
// get data output/api
|
||||
|
||||
// Check if logs need resetting
|
||||
debug!("XMRig-Proxy Watchdog | Attempting GUI log reset check");
|
||||
{
|
||||
let mut lock = gui_api.lock().unwrap();
|
||||
Self::check_reset_gui_output(&mut lock.output, ProcessName::XmrigProxy);
|
||||
}
|
||||
// Always update from output
|
||||
// todo: check difference with xmrig
|
||||
debug!("XMRig-Proxy Watchdog | Starting [update_from_output()]");
|
||||
PubXmrigProxyApi::update_from_output(
|
||||
pub_api,
|
||||
&output_pub,
|
||||
&output_parse,
|
||||
start.elapsed(),
|
||||
process,
|
||||
);
|
||||
// update data from api
|
||||
debug!("XMRig-Proxy Watchdog | Attempting HTTP API request...");
|
||||
match PrivXmrigProxyApi::request_xp_api(&client, api_summary_xp, token_proxy).await {
|
||||
Ok(priv_api) => {
|
||||
debug!("XMRig-Proxy Watchdog | HTTP API request OK, attempting [update_from_priv()]");
|
||||
PubXmrigProxyApi::update_from_priv(pub_api, priv_api);
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"XMRig-Proxy Watchdog | Could not send HTTP API request to: {}\n{}",
|
||||
api_summary_xp, err
|
||||
);
|
||||
}
|
||||
}
|
||||
// update xmrig to use xmrig-proxy if option enabled and local xmrig alive
|
||||
if xmrig_redirect
|
||||
&& gui_api_xmrig.lock().unwrap().node != XvbNode::XmrigProxy.to_string()
|
||||
&& (process_xmrig.lock().unwrap().state == ProcessState::Alive
|
||||
|| process_xmrig.lock().unwrap().state == ProcessState::NotMining)
|
||||
{
|
||||
info!("redirect local xmrig instance to xmrig-proxy");
|
||||
if let Err(err) = update_xmrig_config(
|
||||
&client,
|
||||
api_config_xmrig,
|
||||
&state_xmrig.token,
|
||||
&XvbNode::XmrigProxy,
|
||||
"",
|
||||
GUPAX_VERSION_UNDERSCORE,
|
||||
)
|
||||
.await
|
||||
// Check if logs need resetting
|
||||
debug!("XMRig-Proxy Watchdog | Attempting GUI log reset check");
|
||||
Self::check_reset_gui_output(
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
ProcessName::XmrigProxy,
|
||||
);
|
||||
// Always update from output
|
||||
// todo: check difference with xmrig
|
||||
debug!("XMRig-Proxy Watchdog | Starting [update_from_output()]");
|
||||
PubXmrigProxyApi::update_from_output(
|
||||
pub_api,
|
||||
&output_pub,
|
||||
&output_parse,
|
||||
start.elapsed(),
|
||||
process,
|
||||
);
|
||||
// update data from api
|
||||
debug!("XMRig-Proxy Watchdog | Attempting HTTP API request...");
|
||||
match PrivXmrigProxyApi::request_xp_api(&client, api_summary_xp, token_proxy).await
|
||||
{
|
||||
// show to console error about updating xmrig config
|
||||
warn!("XMRig-Proxy Process | Failed request HTTP API Xmrig");
|
||||
output_console(
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
&format!(
|
||||
"Failure to update xmrig config with HTTP API.\nError: {}",
|
||||
err
|
||||
),
|
||||
ProcessName::XmrigProxy,
|
||||
);
|
||||
} else {
|
||||
gui_api_xmrig.lock().unwrap().node = XvbNode::XmrigProxy.to_string();
|
||||
debug!("XMRig-Proxy Process | mining on Xmrig-Proxy pool");
|
||||
Ok(priv_api) => {
|
||||
debug!("XMRig-Proxy Watchdog | HTTP API request OK, attempting [update_from_priv()]");
|
||||
PubXmrigProxyApi::update_from_priv(pub_api, priv_api);
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"XMRig-Proxy Watchdog | Could not send HTTP API request to: {}\n{}",
|
||||
api_summary_xp, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// do not use more than 1 second for the loop
|
||||
// update xmrig to use xmrig-proxy if option enabled and local xmrig alive
|
||||
if xmrig_redirect
|
||||
&& gui_api_xmrig.lock().unwrap().node != XvbNode::XmrigProxy.to_string()
|
||||
&& (process_xmrig.lock().unwrap().state == ProcessState::Alive
|
||||
|| process_xmrig.lock().unwrap().state == ProcessState::NotMining)
|
||||
{
|
||||
info!("redirect local xmrig instance to xmrig-proxy");
|
||||
if let Err(err) = update_xmrig_config(
|
||||
&client,
|
||||
api_config_xmrig,
|
||||
&state_xmrig.token,
|
||||
&XvbNode::XmrigProxy,
|
||||
"",
|
||||
GUPAX_VERSION_UNDERSCORE,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// show to console error about updating xmrig config
|
||||
warn!("XMRig-Proxy Process | Failed request HTTP API Xmrig");
|
||||
output_console(
|
||||
&mut gui_api.lock().unwrap().output,
|
||||
&format!(
|
||||
"Failure to update xmrig config with HTTP API.\nError: {}",
|
||||
err
|
||||
),
|
||||
ProcessName::XmrigProxy,
|
||||
);
|
||||
} else {
|
||||
gui_api_xmrig.lock().unwrap().node = XvbNode::XmrigProxy.to_string();
|
||||
debug!("XMRig-Proxy Process | mining on Xmrig-Proxy pool");
|
||||
}
|
||||
}
|
||||
} // locked are dropped here
|
||||
// do not use more than 1 second for the loop
|
||||
sleep_end_loop(now, ProcessName::XmrigProxy).await;
|
||||
}
|
||||
|
||||
|
|
|
@ -208,203 +208,207 @@ impl Helper {
|
|||
debug!("XvB Watchdog | ----------- Start of loop -----------");
|
||||
// Set timer of loop
|
||||
let start_loop = std::time::Instant::now();
|
||||
// check if first loop the state of Xmrig-Proxy
|
||||
if first_loop {
|
||||
xp_alive = process_xp.lock().unwrap().state == ProcessState::Alive;
|
||||
msg_retry_done = false;
|
||||
*retry.lock().unwrap() = false;
|
||||
}
|
||||
// verify if p2pool and xmrig are running, else XvB must be reloaded with another token/address to start verifying the other process.
|
||||
if check_state_outcauses_xvb(
|
||||
&client,
|
||||
gui_api,
|
||||
pub_api,
|
||||
process,
|
||||
process_xmrig,
|
||||
process_xp,
|
||||
process_p2pool,
|
||||
&mut first_loop,
|
||||
&handle_algo,
|
||||
pub_api_xmrig,
|
||||
pub_api_xp,
|
||||
state_p2pool,
|
||||
state_xmrig,
|
||||
state_xp,
|
||||
xp_alive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// check signal
|
||||
debug!("XvB | check signal");
|
||||
if signal_interrupt(
|
||||
process,
|
||||
if xp_alive { process_xp } else { process_xmrig },
|
||||
start.into(),
|
||||
&client,
|
||||
pub_api,
|
||||
gui_api,
|
||||
gui_api_xmrig,
|
||||
gui_api_xp,
|
||||
state_p2pool,
|
||||
state_xmrig,
|
||||
state_xp,
|
||||
xp_alive,
|
||||
) {
|
||||
info!("XvB Watchdog | Signal has stopped the loop");
|
||||
break;
|
||||
}
|
||||
// let handle_algo_c = handle_algo.lock().unwrap();
|
||||
let is_algo_started_once = handle_algo.lock().unwrap().is_some();
|
||||
let is_algo_finished = handle_algo
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.is_some_and(|algo| algo.is_finished());
|
||||
let is_request_finished = handle_request
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.is_some_and(|request: &JoinHandle<()>| request.is_finished())
|
||||
|| handle_request.lock().unwrap().is_none();
|
||||
// Send an HTTP API request only if one minute is passed since the last request or if first loop or if algorithm need to retry or if request is finished and algo is finished or almost finished (only public and private stats). We make sure public and private stats are refreshed before doing another run of the algo.
|
||||
// We make sure algo or request are not rerun when they are not over.
|
||||
// in the case of quick refresh before new run of algo, make sure it doesn't happen multiple times.
|
||||
let last_request_expired =
|
||||
last_request.lock().unwrap().elapsed() >= Duration::from_secs(60);
|
||||
let should_refresh_before_next_algo = is_algo_started_once
|
||||
&& last_algorithm.lock().unwrap().elapsed()
|
||||
>= Duration::from_secs((XVB_TIME_ALGO as f32 * 0.95) as u64)
|
||||
&& last_request.lock().unwrap().elapsed() >= Duration::from_secs(25);
|
||||
let process_alive = process.lock().unwrap().state == ProcessState::Alive;
|
||||
if ((last_request_expired || first_loop)
|
||||
|| (*retry.lock().unwrap() || is_algo_finished || should_refresh_before_next_algo)
|
||||
&& process_alive)
|
||||
&& is_request_finished
|
||||
{
|
||||
// do not wait for the request to finish so that they are retrieved at exactly one minute interval and not block the thread.
|
||||
// Private API will also use this instant if XvB is Alive.
|
||||
// first_loop is false here but could be changed to true under some conditions.
|
||||
// will send a stop signal if public stats failed or update data with new one.
|
||||
*handle_request.lock().unwrap() = Some(spawn(
|
||||
enc!((client, pub_api, gui_api, gui_api_p2pool, gui_api_xmrig, gui_api_xp, state_xvb, state_p2pool, state_xmrig, state_xp, process, last_algorithm, retry, handle_algo, time_donated, last_request) async move {
|
||||
// needs to wait here for public stats to get private stats.
|
||||
if last_request_expired || first_loop || should_refresh_before_next_algo {
|
||||
XvbPubStats::update_stats(&client, &gui_api, &pub_api, &process).await;
|
||||
*last_request.lock().unwrap() = Instant::now();
|
||||
}
|
||||
// private stats needs valid token and address.
|
||||
// other stats needs everything to be alive, so just require alive here for now.
|
||||
// maybe later differentiate to add a way to get private stats without running the algo ?
|
||||
if process.lock().unwrap().state == ProcessState::Alive {
|
||||
// get current share to know if we are in a round and this is a required data for algo.
|
||||
let share = gui_api_p2pool.lock().unwrap().sidechain_shares;
|
||||
debug!("XvB | Number of current shares: {}", share);
|
||||
// private stats can be requested every minute or first loop or if the have almost finished.
|
||||
if last_request_expired || first_loop || should_refresh_before_next_algo {
|
||||
debug!("XvB Watchdog | Attempting HTTP private API request...");
|
||||
// reload private stats, it send a signal if error that will be captured on the upper thread.
|
||||
XvbPrivStats::update_stats(
|
||||
&client, &state_p2pool.address, &state_xvb.token, &pub_api, &gui_api, &process,
|
||||
)
|
||||
.await;
|
||||
*last_request.lock().unwrap() = Instant::now();
|
||||
|
||||
// verify in which round type we are
|
||||
let round = round_type(share, &pub_api);
|
||||
// refresh the round we participate in.
|
||||
debug!("XvB | Round type: {:#?}", round);
|
||||
pub_api.lock().unwrap().stats_priv.round_participate = round;
|
||||
// verify if we are the winner of the current round
|
||||
if pub_api.lock().unwrap().stats_pub.winner
|
||||
== Helper::head_tail_of_monero_address(&state_p2pool.address).as_str()
|
||||
{
|
||||
pub_api.lock().unwrap().stats_priv.win_current = true
|
||||
// check if first loop the state of Xmrig-Proxy
|
||||
if first_loop {
|
||||
xp_alive = process_xp.lock().unwrap().state == ProcessState::Alive;
|
||||
msg_retry_done = false;
|
||||
*retry.lock().unwrap() = false;
|
||||
}
|
||||
// verify if p2pool and xmrig are running, else XvB must be reloaded with another token/address to start verifying the other process.
|
||||
if check_state_outcauses_xvb(
|
||||
&client,
|
||||
gui_api,
|
||||
pub_api,
|
||||
process,
|
||||
process_xmrig,
|
||||
process_xp,
|
||||
process_p2pool,
|
||||
&mut first_loop,
|
||||
&handle_algo,
|
||||
pub_api_xmrig,
|
||||
pub_api_xp,
|
||||
state_p2pool,
|
||||
state_xmrig,
|
||||
state_xp,
|
||||
xp_alive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
continue;
|
||||
}
|
||||
// check signal
|
||||
debug!("XvB | check signal");
|
||||
if signal_interrupt(
|
||||
process,
|
||||
if xp_alive { process_xp } else { process_xmrig },
|
||||
start.into(),
|
||||
&client,
|
||||
pub_api,
|
||||
gui_api,
|
||||
gui_api_xmrig,
|
||||
gui_api_xp,
|
||||
state_p2pool,
|
||||
state_xmrig,
|
||||
state_xp,
|
||||
xp_alive,
|
||||
) {
|
||||
info!("XvB Watchdog | Signal has stopped the loop");
|
||||
break;
|
||||
}
|
||||
// let handle_algo_c = handle_algo.lock().unwrap();
|
||||
let is_algo_started_once = handle_algo.lock().unwrap().is_some();
|
||||
let is_algo_finished = handle_algo
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.is_some_and(|algo| algo.is_finished());
|
||||
let is_request_finished = handle_request
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.is_some_and(|request: &JoinHandle<()>| request.is_finished())
|
||||
|| handle_request.lock().unwrap().is_none();
|
||||
// Send an HTTP API request only if one minute is passed since the last request or if first loop or if algorithm need to retry or if request is finished and algo is finished or almost finished (only public and private stats). We make sure public and private stats are refreshed before doing another run of the algo.
|
||||
// We make sure algo or request are not rerun when they are not over.
|
||||
// in the case of quick refresh before new run of algo, make sure it doesn't happen multiple times.
|
||||
let last_request_expired =
|
||||
last_request.lock().unwrap().elapsed() >= Duration::from_secs(60);
|
||||
let should_refresh_before_next_algo = is_algo_started_once
|
||||
&& last_algorithm.lock().unwrap().elapsed()
|
||||
>= Duration::from_secs((XVB_TIME_ALGO as f32 * 0.95) as u64)
|
||||
&& last_request.lock().unwrap().elapsed() >= Duration::from_secs(25);
|
||||
let process_alive = process.lock().unwrap().state == ProcessState::Alive;
|
||||
if ((last_request_expired || first_loop)
|
||||
|| (*retry.lock().unwrap()
|
||||
|| is_algo_finished
|
||||
|| should_refresh_before_next_algo)
|
||||
&& process_alive)
|
||||
&& is_request_finished
|
||||
{
|
||||
// do not wait for the request to finish so that they are retrieved at exactly one minute interval and not block the thread.
|
||||
// Private API will also use this instant if XvB is Alive.
|
||||
// first_loop is false here but could be changed to true under some conditions.
|
||||
// will send a stop signal if public stats failed or update data with new one.
|
||||
*handle_request.lock().unwrap() = Some(spawn(
|
||||
enc!((client, pub_api, gui_api, gui_api_p2pool, gui_api_xmrig, gui_api_xp, state_xvb, state_p2pool, state_xmrig, state_xp, process, last_algorithm, retry, handle_algo, time_donated, last_request) async move {
|
||||
// needs to wait here for public stats to get private stats.
|
||||
if last_request_expired || first_loop || should_refresh_before_next_algo {
|
||||
XvbPubStats::update_stats(&client, &gui_api, &pub_api, &process).await;
|
||||
*last_request.lock().unwrap() = Instant::now();
|
||||
}
|
||||
}
|
||||
let hashrate = current_controllable_hr(xp_alive, &gui_api_xp, &gui_api_xmrig);
|
||||
let difficulty_data_is_ready = gui_api_p2pool.lock().unwrap().p2pool_difficulty_u64 > 100_000;
|
||||
if (first_loop || *retry.lock().unwrap()|| is_algo_finished) && hashrate > 0.0 && process.lock().unwrap().state == ProcessState::Alive && difficulty_data_is_ready
|
||||
{
|
||||
// if algo was started, it must not retry next loop.
|
||||
*retry.lock().unwrap() = false;
|
||||
// reset instant because algo will start.
|
||||
*last_algorithm.lock().unwrap() = Instant::now();
|
||||
*handle_algo.lock().unwrap() = Some(spawn(enc!((client, gui_api, gui_api_xmrig, gui_api_xp, state_xmrig, state_xp, time_donated, state_xvb) async move {
|
||||
let token_xmrig = if xp_alive {
|
||||
&state_xp.token
|
||||
} else {
|
||||
&state_xmrig.token
|
||||
};
|
||||
let rig = if xp_alive {
|
||||
""
|
||||
} else {
|
||||
&state_xmrig.rig
|
||||
};
|
||||
algorithm(
|
||||
&client,
|
||||
&pub_api,
|
||||
&gui_api,
|
||||
&gui_api_xmrig,
|
||||
&gui_api_xp,
|
||||
&gui_api_p2pool,
|
||||
token_xmrig,
|
||||
&state_p2pool,
|
||||
share,
|
||||
&time_donated,
|
||||
rig,
|
||||
xp_alive,
|
||||
state_xvb.p2pool_buffer
|
||||
).await;
|
||||
})));
|
||||
} else {
|
||||
// if xmrig is still at 0 HR but is alive and algorithm is skipped, recheck first 10s of xmrig inside algorithm next time (in one minute). Don't check if algo failed to start because state was not alive after getting private stats.
|
||||
// private stats needs valid token and address.
|
||||
// other stats needs everything to be alive, so just require alive here for now.
|
||||
// maybe later differentiate to add a way to get private stats without running the algo ?
|
||||
if process.lock().unwrap().state == ProcessState::Alive {
|
||||
// get current share to know if we are in a round and this is a required data for algo.
|
||||
let share = gui_api_p2pool.lock().unwrap().sidechain_shares;
|
||||
debug!("XvB | Number of current shares: {}", share);
|
||||
// private stats can be requested every minute or first loop or if the have almost finished.
|
||||
if last_request_expired || first_loop || should_refresh_before_next_algo {
|
||||
debug!("XvB Watchdog | Attempting HTTP private API request...");
|
||||
// reload private stats, it send a signal if error that will be captured on the upper thread.
|
||||
XvbPrivStats::update_stats(
|
||||
&client, &state_p2pool.address, &state_xvb.token, &pub_api, &gui_api, &process,
|
||||
)
|
||||
.await;
|
||||
*last_request.lock().unwrap() = Instant::now();
|
||||
|
||||
if (hashrate == 0.0 || !difficulty_data_is_ready) && process.lock().unwrap().state == ProcessState::Alive {
|
||||
*retry.lock().unwrap() = true
|
||||
// verify in which round type we are
|
||||
let round = round_type(share, &pub_api);
|
||||
// refresh the round we participate in.
|
||||
debug!("XvB | Round type: {:#?}", round);
|
||||
pub_api.lock().unwrap().stats_priv.round_participate = round;
|
||||
// verify if we are the winner of the current round
|
||||
if pub_api.lock().unwrap().stats_pub.winner
|
||||
== Helper::head_tail_of_monero_address(&state_p2pool.address).as_str()
|
||||
{
|
||||
pub_api.lock().unwrap().stats_priv.win_current = true
|
||||
}
|
||||
}
|
||||
let hashrate = current_controllable_hr(xp_alive, &gui_api_xp, &gui_api_xmrig);
|
||||
let difficulty_data_is_ready = gui_api_p2pool.lock().unwrap().p2pool_difficulty_u64 > 100_000;
|
||||
if (first_loop || *retry.lock().unwrap()|| is_algo_finished) && hashrate > 0.0 && process.lock().unwrap().state == ProcessState::Alive && difficulty_data_is_ready
|
||||
{
|
||||
// if algo was started, it must not retry next loop.
|
||||
*retry.lock().unwrap() = false;
|
||||
// reset instant because algo will start.
|
||||
*last_algorithm.lock().unwrap() = Instant::now();
|
||||
*handle_algo.lock().unwrap() = Some(spawn(enc!((client, gui_api, gui_api_xmrig, gui_api_xp, state_xmrig, state_xp, time_donated, state_xvb) async move {
|
||||
let token_xmrig = if xp_alive {
|
||||
&state_xp.token
|
||||
} else {
|
||||
&state_xmrig.token
|
||||
};
|
||||
let rig = if xp_alive {
|
||||
""
|
||||
} else {
|
||||
&state_xmrig.rig
|
||||
};
|
||||
algorithm(
|
||||
&client,
|
||||
&pub_api,
|
||||
&gui_api,
|
||||
&gui_api_xmrig,
|
||||
&gui_api_xp,
|
||||
&gui_api_p2pool,
|
||||
token_xmrig,
|
||||
&state_p2pool,
|
||||
share,
|
||||
&time_donated,
|
||||
rig,
|
||||
xp_alive,
|
||||
state_xvb.p2pool_buffer
|
||||
).await;
|
||||
})));
|
||||
} else {
|
||||
// if xmrig is still at 0 HR but is alive and algorithm is skipped, recheck first 10s of xmrig inside algorithm next time (in one minute). Don't check if algo failed to start because state was not alive after getting private stats.
|
||||
|
||||
}
|
||||
}),
|
||||
));
|
||||
if (hashrate == 0.0 || !difficulty_data_is_ready) && process.lock().unwrap().state == ProcessState::Alive {
|
||||
*retry.lock().unwrap() = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}),
|
||||
));
|
||||
}
|
||||
// if retry is false, next time the message about waiting for xmrig HR can be shown.
|
||||
if !*retry.lock().unwrap() {
|
||||
msg_retry_done = false;
|
||||
}
|
||||
// inform user that algorithm has not yet started because it is waiting for xmrig HR.
|
||||
// show this message only once before the start of algo
|
||||
if *retry.lock().unwrap() && !msg_retry_done {
|
||||
let msg = if xp_alive {
|
||||
"Algorithm is waiting for 1 minute average HR of XMRig-Proxy or p2pool data"
|
||||
} else {
|
||||
"Algorithm is waiting for 10 seconds average HR of XMRig or p2pool data"
|
||||
};
|
||||
output_console(&mut gui_api.lock().unwrap().output, msg, ProcessName::Xvb);
|
||||
msg_retry_done = true;
|
||||
}
|
||||
// update indicator (time before switch and mining location) in private stats
|
||||
// if algo not running, second message.
|
||||
// will update countdown every second.
|
||||
// verify current node which is set by algo or circonstances (failed node).
|
||||
// verify given time set by algo and start time of current algo.
|
||||
// will run only if XvB is alive.
|
||||
// let algo time to start, so no countdown is shown.
|
||||
update_indicator_algo(
|
||||
is_algo_started_once,
|
||||
is_algo_finished,
|
||||
process,
|
||||
pub_api,
|
||||
*time_donated.lock().unwrap(),
|
||||
&last_algorithm,
|
||||
);
|
||||
// first_loop is done, but maybe retry will allow the algorithm to retry again.
|
||||
if first_loop {
|
||||
first_loop = false;
|
||||
}
|
||||
// Sleep (only if 900ms hasn't passed)
|
||||
}
|
||||
// if retry is false, next time the message about waiting for xmrig HR can be shown.
|
||||
if !*retry.lock().unwrap() {
|
||||
msg_retry_done = false;
|
||||
}
|
||||
// inform user that algorithm has not yet started because it is waiting for xmrig HR.
|
||||
// show this message only once before the start of algo
|
||||
if *retry.lock().unwrap() && !msg_retry_done {
|
||||
let msg = if xp_alive {
|
||||
"Algorithm is waiting for 1 minute average HR of XMRig-Proxy or p2pool data"
|
||||
} else {
|
||||
"Algorithm is waiting for 10 seconds average HR of XMRig or p2pool data"
|
||||
};
|
||||
output_console(&mut gui_api.lock().unwrap().output, msg, ProcessName::Xvb);
|
||||
msg_retry_done = true;
|
||||
}
|
||||
// update indicator (time before switch and mining location) in private stats
|
||||
// if algo not running, second message.
|
||||
// will update countdown every second.
|
||||
// verify current node which is set by algo or circonstances (failed node).
|
||||
// verify given time set by algo and start time of current algo.
|
||||
// will run only if XvB is alive.
|
||||
// let algo time to start, so no countdown is shown.
|
||||
update_indicator_algo(
|
||||
is_algo_started_once,
|
||||
is_algo_finished,
|
||||
process,
|
||||
pub_api,
|
||||
*time_donated.lock().unwrap(),
|
||||
&last_algorithm,
|
||||
);
|
||||
// first_loop is done, but maybe retry will allow the algorithm to retry again.
|
||||
if first_loop {
|
||||
first_loop = false;
|
||||
}
|
||||
// Sleep (only if 900ms hasn't passed)
|
||||
sleep_end_loop(start_loop, ProcessName::Xvb).await;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue