mirror of
https://github.com/Cyrix126/gupaxx.git
synced 2025-01-08 15:09:24 +00:00
feat: P2Pool, add a checkbox to auto switch to local node when ready (#63)
This commit is contained in:
parent
0732bab78a
commit
075beddea1
9 changed files with 128 additions and 25 deletions
|
@ -623,6 +623,9 @@ impl App {
|
||||||
info!("App Init | Setting saved [Tab]...");
|
info!("App Init | Setting saved [Tab]...");
|
||||||
app.tab = app.state.gupax.tab;
|
app.tab = app.state.gupax.tab;
|
||||||
|
|
||||||
|
// Set saved prefer local node to runtime
|
||||||
|
app.p2pool_api.lock().unwrap().prefer_local_node = app.state.p2pool.prefer_local_node;
|
||||||
|
|
||||||
// Set saved Hero mode to runtime.
|
// Set saved Hero mode to runtime.
|
||||||
debug!("Setting runtime_mode & runtime_manual_amount");
|
debug!("Setting runtime_mode & runtime_manual_amount");
|
||||||
// apply hero if simple mode saved with checkbox true, will let default to auto otherwise
|
// apply hero if simple mode saved with checkbox true, will let default to auto otherwise
|
||||||
|
|
|
@ -254,6 +254,7 @@ impl crate::app::App {
|
||||||
&self.state.p2pool,
|
&self.state.p2pool,
|
||||||
&self.state.gupax.absolute_p2pool_path,
|
&self.state.gupax.absolute_p2pool_path,
|
||||||
self.gather_backup_hosts(),
|
self.gather_backup_hosts(),
|
||||||
|
false,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ProcessName::Xmrig => {
|
ProcessName::Xmrig => {
|
||||||
|
@ -324,6 +325,7 @@ impl crate::app::App {
|
||||||
&self.state.p2pool,
|
&self.state.p2pool,
|
||||||
&self.state.gupax.absolute_p2pool_path,
|
&self.state.gupax.absolute_p2pool_path,
|
||||||
self.gather_backup_hosts(),
|
self.gather_backup_hosts(),
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
|
|
||||||
ProcessName::Xmrig => {
|
ProcessName::Xmrig => {
|
||||||
|
|
|
@ -43,8 +43,10 @@ impl P2pool {
|
||||||
) {
|
) {
|
||||||
//---------------------------------------------------------------------------------------------------- [Simple] Console
|
//---------------------------------------------------------------------------------------------------- [Simple] Console
|
||||||
// debug!("P2Pool Tab | Rendering [Console]");
|
// debug!("P2Pool Tab | Rendering [Console]");
|
||||||
|
let mut api_lock = api.lock().unwrap();
|
||||||
|
// let mut prefer_local_node = api.lock().unwrap().prefer_local_node;
|
||||||
egui::ScrollArea::vertical().show(ui, |ui| {
|
egui::ScrollArea::vertical().show(ui, |ui| {
|
||||||
let text = &api.lock().unwrap().output;
|
let text = &api_lock.output;
|
||||||
ui.group(|ui| {
|
ui.group(|ui| {
|
||||||
console(ui, text);
|
console(ui, text);
|
||||||
if !self.simple {
|
if !self.simple {
|
||||||
|
@ -74,7 +76,7 @@ impl P2pool {
|
||||||
);
|
);
|
||||||
|
|
||||||
if self.simple {
|
if self.simple {
|
||||||
self.simple(ui, ping);
|
self.simple(ui, ping, &mut api_lock);
|
||||||
} else {
|
} else {
|
||||||
self.advanced(ui, node_vec);
|
self.advanced(ui, node_vec);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,11 +35,14 @@ use egui::vec2;
|
||||||
use crate::constants::*;
|
use crate::constants::*;
|
||||||
use egui::{Color32, ComboBox, RichText, Ui};
|
use egui::{Color32, ComboBox, RichText, Ui};
|
||||||
use log::*;
|
use log::*;
|
||||||
|
|
||||||
|
use super::p2pool::PubP2poolApi;
|
||||||
|
|
||||||
impl P2pool {
|
impl P2pool {
|
||||||
pub(super) fn simple(&mut self, ui: &mut Ui, ping: &Arc<Mutex<Ping>>) {
|
pub(super) fn simple(&mut self, ui: &mut Ui, ping: &Arc<Mutex<Ping>>, api: &mut PubP2poolApi) {
|
||||||
ui.vertical_centered(|ui|{
|
ui.vertical_centered(|ui|{
|
||||||
ui.add_space(SPACE);
|
ui.add_space(SPACE);
|
||||||
ui.checkbox(&mut self.local_node, "Use a local node").on_hover_text("If checked (recommended), p2pool will automatically use the local node.\nCheck the Node tab to start a local node.\nIf unchecked, p2pool will attempt to use a remote node.");
|
ui.checkbox(&mut self.local_node, "Start with a local node").on_hover_text("If checked (recommended), p2pool will start trying to use the local node.\nCheck the Node tab to start a local node.\nIf unchecked, p2pool will attempt to use a remote node.");
|
||||||
});
|
});
|
||||||
ui.add_space(SPACE * 2.0);
|
ui.add_space(SPACE * 2.0);
|
||||||
// if checked, use only local node
|
// if checked, use only local node
|
||||||
|
@ -99,7 +102,6 @@ impl P2pool {
|
||||||
ui.style_mut().override_text_style = Some(egui::TextStyle::Button);
|
ui.style_mut().override_text_style = Some(egui::TextStyle::Button);
|
||||||
ui.horizontal(|ui| {
|
ui.horizontal(|ui| {
|
||||||
ui.style_mut().wrap_mode = Some(TextWrapMode::Extend);
|
ui.style_mut().wrap_mode = Some(TextWrapMode::Extend);
|
||||||
// ui.columns_const(|[col1, col2, col3, col4, col5]| {
|
|
||||||
let width = ((ui.available_width() / 5.0)
|
let width = ((ui.available_width() / 5.0)
|
||||||
- (ui.spacing().item_spacing.x * (4.0 / 5.0)))
|
- (ui.spacing().item_spacing.x * (4.0 / 5.0)))
|
||||||
.max(20.0);
|
.max(20.0);
|
||||||
|
@ -195,33 +197,43 @@ impl P2pool {
|
||||||
ui.group(|ui| {
|
ui.group(|ui| {
|
||||||
ui.horizontal(|ui| {
|
ui.horizontal(|ui| {
|
||||||
let width =
|
let width =
|
||||||
(((ui.available_width() - ui.spacing().item_spacing.x) / 3.0)
|
(((ui.available_width() - ui.spacing().item_spacing.x) / 4.0)
|
||||||
- SPACE * 1.5)
|
- SPACE * 1.5)
|
||||||
.max(ui.text_style_height(&TextStyle::Button) * 7.0);
|
.max(ui.text_style_height(&TextStyle::Button) * 7.0);
|
||||||
let size = vec2(
|
let size = vec2(
|
||||||
width,
|
width,
|
||||||
height_txt_before_button(ui, &TextStyle::Button) * 2.0,
|
height_txt_before_button(ui, &TextStyle::Button) * 2.0,
|
||||||
);
|
);
|
||||||
// [Auto-node]
|
|
||||||
ui.add_sized(
|
ui.add_sized(
|
||||||
size,
|
size,
|
||||||
Checkbox::new(&mut self.auto_select, "Auto-select"),
|
Checkbox::new(&mut self.auto_select, "Auto-select"),
|
||||||
)
|
)
|
||||||
// ui.checkbox(&mut self.auto_select, "Auto-select")
|
|
||||||
.on_hover_text(P2POOL_AUTO_SELECT);
|
.on_hover_text(P2POOL_AUTO_SELECT);
|
||||||
ui.separator();
|
ui.separator();
|
||||||
// [Auto-node]
|
|
||||||
ui.add_sized(size, Checkbox::new(&mut self.auto_ping, "Auto-ping"))
|
ui.add_sized(size, Checkbox::new(&mut self.auto_ping, "Auto-ping"))
|
||||||
// ui.checkbox(&mut self.auto_ping, "Auto-ping")
|
|
||||||
.on_hover_text(P2POOL_AUTO_NODE);
|
.on_hover_text(P2POOL_AUTO_NODE);
|
||||||
ui.separator();
|
ui.separator();
|
||||||
// [Backup host]
|
|
||||||
ui.add_sized(
|
ui.add_sized(
|
||||||
size,
|
size,
|
||||||
Checkbox::new(&mut self.backup_host, "Backup host"),
|
Checkbox::new(&mut self.backup_host, "Backup host"),
|
||||||
)
|
)
|
||||||
// ui.checkbox(&mut self.backup_host, "Backup host")
|
|
||||||
.on_hover_text(P2POOL_BACKUP_HOST_SIMPLE);
|
.on_hover_text(P2POOL_BACKUP_HOST_SIMPLE);
|
||||||
|
ui.separator();
|
||||||
|
// set preferred local node immediately if we are on simple mode.
|
||||||
|
if ui
|
||||||
|
.add_sized(
|
||||||
|
size,
|
||||||
|
Checkbox::new(
|
||||||
|
&mut self.prefer_local_node,
|
||||||
|
"Auto-Switch to Local Node",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.on_hover_text(P2POOL_AUTOSWITCH_LOCAL_NODE)
|
||||||
|
.clicked()
|
||||||
|
{
|
||||||
|
api.prefer_local_node = self.prefer_local_node;
|
||||||
|
// api.lock().unwrap().prefer_local_node = self.prefer_local_node;
|
||||||
|
}
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -300,6 +300,7 @@ pub struct P2pool {
|
||||||
pub rpc: String,
|
pub rpc: String,
|
||||||
pub zmq: String,
|
pub zmq: String,
|
||||||
pub selected_node: SelectedPoolNode,
|
pub selected_node: SelectedPoolNode,
|
||||||
|
pub prefer_local_node: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// compatible for P2Pool and Xmrig/Proxy
|
// compatible for P2Pool and Xmrig/Proxy
|
||||||
|
@ -618,6 +619,7 @@ impl Default for P2pool {
|
||||||
rpc: "18081".to_string(),
|
rpc: "18081".to_string(),
|
||||||
zmq_rig: "18083".to_string(),
|
zmq_rig: "18083".to_string(),
|
||||||
},
|
},
|
||||||
|
prefer_local_node: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,6 +83,7 @@ mod test {
|
||||||
ip = "192.168.1.123"
|
ip = "192.168.1.123"
|
||||||
rpc = "18089"
|
rpc = "18089"
|
||||||
zmq = "18083"
|
zmq = "18083"
|
||||||
|
prefer_local_node = true
|
||||||
|
|
||||||
[p2pool.selected_node]
|
[p2pool.selected_node]
|
||||||
index = 0
|
index = 0
|
||||||
|
|
|
@ -44,8 +44,10 @@ use crate::{
|
||||||
macros::*,
|
macros::*,
|
||||||
xmr::*,
|
xmr::*,
|
||||||
};
|
};
|
||||||
|
use enclose::enc;
|
||||||
use log::*;
|
use log::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::mem;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::{
|
use std::{
|
||||||
fmt::Write,
|
fmt::Write,
|
||||||
|
@ -54,6 +56,7 @@ use std::{
|
||||||
thread,
|
thread,
|
||||||
time::*,
|
time::*,
|
||||||
};
|
};
|
||||||
|
use tokio::time::sleep;
|
||||||
impl Helper {
|
impl Helper {
|
||||||
#[cold]
|
#[cold]
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
|
@ -192,6 +195,7 @@ impl Helper {
|
||||||
state: &P2pool,
|
state: &P2pool,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
backup_hosts: Option<Vec<PoolNode>>,
|
backup_hosts: Option<Vec<PoolNode>>,
|
||||||
|
override_to_local_node: bool,
|
||||||
) {
|
) {
|
||||||
info!("P2Pool | Attempting to restart...");
|
info!("P2Pool | Attempting to restart...");
|
||||||
helper.lock().unwrap().p2pool.lock().unwrap().signal = ProcessSignal::Restart;
|
helper.lock().unwrap().p2pool.lock().unwrap().signal = ProcessSignal::Restart;
|
||||||
|
@ -208,7 +212,7 @@ impl Helper {
|
||||||
}
|
}
|
||||||
// Ok, process is not alive, start the new one!
|
// Ok, process is not alive, start the new one!
|
||||||
info!("P2Pool | Old process seems dead, starting new one!");
|
info!("P2Pool | Old process seems dead, starting new one!");
|
||||||
Self::start_p2pool(&helper, &state, &path, backup_hosts);
|
Self::start_p2pool(&helper, &state, &path, backup_hosts, override_to_local_node);
|
||||||
});
|
});
|
||||||
info!("P2Pool | Restart ... OK");
|
info!("P2Pool | Restart ... OK");
|
||||||
}
|
}
|
||||||
|
@ -221,11 +225,17 @@ impl Helper {
|
||||||
state: &P2pool,
|
state: &P2pool,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
backup_hosts: Option<Vec<PoolNode>>,
|
backup_hosts: Option<Vec<PoolNode>>,
|
||||||
|
override_to_local_node: bool,
|
||||||
) {
|
) {
|
||||||
helper.lock().unwrap().p2pool.lock().unwrap().state = ProcessState::Middle;
|
helper.lock().unwrap().p2pool.lock().unwrap().state = ProcessState::Middle;
|
||||||
|
|
||||||
let (args, api_path_local, api_path_network, api_path_pool, api_path_p2p) =
|
let (args, api_path_local, api_path_network, api_path_pool, api_path_p2p) =
|
||||||
Self::build_p2pool_args_and_mutate_img(helper, state, path, backup_hosts);
|
Self::build_p2pool_args_and_mutate_img(
|
||||||
|
helper,
|
||||||
|
state,
|
||||||
|
path,
|
||||||
|
&backup_hosts,
|
||||||
|
override_to_local_node,
|
||||||
|
);
|
||||||
|
|
||||||
// Print arguments & user settings to console
|
// Print arguments & user settings to console
|
||||||
crate::disk::print_dash(&format!(
|
crate::disk::print_dash(&format!(
|
||||||
|
@ -239,6 +249,20 @@ impl Helper {
|
||||||
let pub_api = Arc::clone(&helper.lock().unwrap().pub_api_p2pool);
|
let pub_api = Arc::clone(&helper.lock().unwrap().pub_api_p2pool);
|
||||||
let gupax_p2pool_api = Arc::clone(&helper.lock().unwrap().gupax_p2pool_api);
|
let gupax_p2pool_api = Arc::clone(&helper.lock().unwrap().gupax_p2pool_api);
|
||||||
let path = path.to_path_buf();
|
let path = path.to_path_buf();
|
||||||
|
// thread to check if the button for switching to local node if it is synced to restart p2pool.
|
||||||
|
// starting the thread even if the option is disabled allows to apply the change immediately in case it is enabled again without asking the user to restart p2pool.
|
||||||
|
// Start this thread only if we don't already override to local node
|
||||||
|
if !override_to_local_node {
|
||||||
|
thread::spawn(enc!((helper, state, path, backup_hosts) move || {
|
||||||
|
Self::watch_switch_p2pool_to_local_node(
|
||||||
|
&helper,
|
||||||
|
&state,
|
||||||
|
&path,
|
||||||
|
backup_hosts,
|
||||||
|
);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
Self::spawn_p2pool_watchdog(
|
Self::spawn_p2pool_watchdog(
|
||||||
process,
|
process,
|
||||||
|
@ -274,7 +298,8 @@ impl Helper {
|
||||||
helper: &Arc<Mutex<Self>>,
|
helper: &Arc<Mutex<Self>>,
|
||||||
state: &P2pool,
|
state: &P2pool,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
backup_hosts: Option<Vec<PoolNode>>,
|
backup_hosts: &Option<Vec<PoolNode>>,
|
||||||
|
override_to_local_node: bool,
|
||||||
) -> (Vec<String>, PathBuf, PathBuf, PathBuf, PathBuf) {
|
) -> (Vec<String>, PathBuf, PathBuf, PathBuf, PathBuf) {
|
||||||
let mut args = Vec::with_capacity(500);
|
let mut args = Vec::with_capacity(500);
|
||||||
let path = path.to_path_buf();
|
let path = path.to_path_buf();
|
||||||
|
@ -282,7 +307,7 @@ impl Helper {
|
||||||
api_path.pop();
|
api_path.pop();
|
||||||
|
|
||||||
// [Simple]
|
// [Simple]
|
||||||
if state.simple && !state.local_node {
|
if state.simple && (!state.local_node && !override_to_local_node) {
|
||||||
// Build the p2pool argument
|
// Build the p2pool argument
|
||||||
let (ip, rpc, zmq) = RemoteNode::get_ip_rpc_zmq(&state.node); // Get: (IP, RPC, ZMQ)
|
let (ip, rpc, zmq) = RemoteNode::get_ip_rpc_zmq(&state.node); // Get: (IP, RPC, ZMQ)
|
||||||
args.push("--wallet".to_string());
|
args.push("--wallet".to_string());
|
||||||
|
@ -323,7 +348,7 @@ impl Helper {
|
||||||
out_peers: "10".to_string(),
|
out_peers: "10".to_string(),
|
||||||
in_peers: "10".to_string(),
|
in_peers: "10".to_string(),
|
||||||
};
|
};
|
||||||
} else if state.simple && state.local_node {
|
} else if state.simple && (state.local_node || override_to_local_node) {
|
||||||
// use the local node
|
// use the local node
|
||||||
// Build the p2pool argument
|
// Build the p2pool argument
|
||||||
args.push("--wallet".to_string());
|
args.push("--wallet".to_string());
|
||||||
|
@ -576,9 +601,8 @@ impl Helper {
|
||||||
}
|
}
|
||||||
let start = process.lock().unwrap().start;
|
let start = process.lock().unwrap().start;
|
||||||
|
|
||||||
// Reset stats before loop
|
// Reset stats before loop, except action parameters without a need for saving to state.
|
||||||
*pub_api.lock().unwrap() = PubP2poolApi::new();
|
reset_data_p2pool(&pub_api, &gui_api);
|
||||||
*gui_api.lock().unwrap() = PubP2poolApi::new();
|
|
||||||
|
|
||||||
// 4. Loop as watchdog
|
// 4. Loop as watchdog
|
||||||
let mut first_loop = true;
|
let mut first_loop = true;
|
||||||
|
@ -611,6 +635,8 @@ impl Helper {
|
||||||
) {
|
) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
// check that if prefer local node is true and local node is alived and p2pool was not started with local node
|
||||||
|
|
||||||
// Check vector of user input
|
// Check vector of user input
|
||||||
check_user_input(&process, &mut stdin);
|
check_user_input(&process, &mut stdin);
|
||||||
// Check if logs need resetting
|
// Check if logs need resetting
|
||||||
|
@ -713,6 +739,43 @@ impl Helper {
|
||||||
// 5. If loop broke, we must be done here.
|
// 5. If loop broke, we must be done here.
|
||||||
info!("P2Pool Watchdog | Watchdog thread exiting... Goodbye!");
|
info!("P2Pool Watchdog | Watchdog thread exiting... Goodbye!");
|
||||||
}
|
}
|
||||||
|
#[tokio::main]
|
||||||
|
#[allow(clippy::await_holding_lock)]
|
||||||
|
async fn watch_switch_p2pool_to_local_node(
|
||||||
|
helper: &Arc<Mutex<Helper>>,
|
||||||
|
state: &P2pool,
|
||||||
|
path_p2pool: &Path,
|
||||||
|
backup_hosts: Option<Vec<PoolNode>>,
|
||||||
|
) {
|
||||||
|
// do not try to restart immediately after a first start, or else the two start will be in conflict.
|
||||||
|
sleep(Duration::from_secs(10)).await;
|
||||||
|
|
||||||
|
// check every seconds
|
||||||
|
loop {
|
||||||
|
let helper_lock = helper.lock().unwrap();
|
||||||
|
let node_process = helper_lock.node.lock().unwrap();
|
||||||
|
let process = helper_lock.p2pool.lock().unwrap();
|
||||||
|
let gui_api = helper_lock.gui_api_p2pool.lock().unwrap();
|
||||||
|
if gui_api.prefer_local_node
|
||||||
|
&& state.simple
|
||||||
|
&& !state.local_node
|
||||||
|
&& node_process.state == ProcessState::Alive
|
||||||
|
&& process.is_alive()
|
||||||
|
{
|
||||||
|
drop(gui_api);
|
||||||
|
drop(process);
|
||||||
|
drop(node_process);
|
||||||
|
drop(helper_lock);
|
||||||
|
Helper::restart_p2pool(helper, state, path_p2pool, backup_hosts, true);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
drop(gui_api);
|
||||||
|
drop(process);
|
||||||
|
drop(node_process);
|
||||||
|
drop(helper_lock);
|
||||||
|
sleep(Duration::from_secs(1)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
//---------------------------------------------------------------------------------------------------- [ImgP2pool]
|
//---------------------------------------------------------------------------------------------------- [ImgP2pool]
|
||||||
// A static "image" of data that P2Pool started with.
|
// A static "image" of data that P2Pool started with.
|
||||||
|
@ -814,6 +877,7 @@ pub struct PubP2poolApi {
|
||||||
// from local/p2p
|
// from local/p2p
|
||||||
pub p2p_connected: u32,
|
pub p2p_connected: u32,
|
||||||
pub node_connected: bool,
|
pub node_connected: bool,
|
||||||
|
pub prefer_local_node: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PubP2poolApi {
|
impl Default for PubP2poolApi {
|
||||||
|
@ -868,6 +932,7 @@ impl PubP2poolApi {
|
||||||
p2p_connected: 0,
|
p2p_connected: 0,
|
||||||
synchronised: false,
|
synchronised: false,
|
||||||
node_connected: false,
|
node_connected: false,
|
||||||
|
prefer_local_node: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -887,6 +952,7 @@ impl PubP2poolApi {
|
||||||
tick: std::mem::take(&mut gui_api.tick),
|
tick: std::mem::take(&mut gui_api.tick),
|
||||||
sidechain_shares: std::mem::take(&mut gui_api.sidechain_shares),
|
sidechain_shares: std::mem::take(&mut gui_api.sidechain_shares),
|
||||||
sidechain_ehr: std::mem::take(&mut gui_api.sidechain_ehr),
|
sidechain_ehr: std::mem::take(&mut gui_api.sidechain_ehr),
|
||||||
|
prefer_local_node: std::mem::take(&mut gui_api.prefer_local_node),
|
||||||
..pub_api.clone()
|
..pub_api.clone()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -1121,10 +1187,13 @@ impl PubP2poolApi {
|
||||||
}
|
}
|
||||||
/// Check if all conditions are met to be alive or if something is wrong
|
/// Check if all conditions are met to be alive or if something is wrong
|
||||||
fn update_state(&self, process: &mut Process) {
|
fn update_state(&self, process: &mut Process) {
|
||||||
if self.synchronised && self.node_connected && self.p2p_connected > 1 && self.height > 10 {
|
if process.state == ProcessState::Syncing
|
||||||
|
&& self.synchronised
|
||||||
|
&& self.node_connected
|
||||||
|
&& self.p2p_connected > 1
|
||||||
|
&& self.height > 10
|
||||||
|
{
|
||||||
process.state = ProcessState::Alive;
|
process.state = ProcessState::Alive;
|
||||||
} else {
|
|
||||||
process.state = ProcessState::Syncing;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1385,3 +1454,12 @@ impl PrivP2PoolP2PApi {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fn reset_data_p2pool(pub_api: &Arc<Mutex<PubP2poolApi>>, gui_api: &Arc<Mutex<PubP2poolApi>>) {
|
||||||
|
let current_pref = mem::take(&mut pub_api.lock().unwrap().prefer_local_node);
|
||||||
|
// even if it is a restart, we want to keep set values by the user without the need from him to click on save button.
|
||||||
|
|
||||||
|
*pub_api.lock().unwrap() = PubP2poolApi::new();
|
||||||
|
*gui_api.lock().unwrap() = PubP2poolApi::new();
|
||||||
|
// to keep the value modified by xmrig even if xvb is dead.
|
||||||
|
pub_api.lock().unwrap().prefer_local_node = current_pref;
|
||||||
|
}
|
||||||
|
|
|
@ -208,6 +208,7 @@ pub fn init_auto(app: &mut App) {
|
||||||
&app.state.p2pool,
|
&app.state.p2pool,
|
||||||
&app.state.gupax.absolute_p2pool_path,
|
&app.state.gupax.absolute_p2pool_path,
|
||||||
backup_hosts,
|
backup_hosts,
|
||||||
|
false,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -384,6 +384,8 @@ pub const P2POOL_BACKUP_HOST_SIMPLE: &str = r#"Automatically switch to the other
|
||||||
Note: you must ping the remote nodes or this feature will default to only using the currently selected node."#;
|
Note: you must ping the remote nodes or this feature will default to only using the currently selected node."#;
|
||||||
pub const P2POOL_BACKUP_HOST_ADVANCED: &str =
|
pub const P2POOL_BACKUP_HOST_ADVANCED: &str =
|
||||||
"Automatically switch to the other nodes in your list if the current one is down.";
|
"Automatically switch to the other nodes in your list if the current one is down.";
|
||||||
|
pub const P2POOL_AUTOSWITCH_LOCAL_NODE: &str =
|
||||||
|
"Automatically switch to the local node when it will be ready to be used.";
|
||||||
pub const P2POOL_SELECT_FASTEST: &str = "Select the fastest remote Monero node";
|
pub const P2POOL_SELECT_FASTEST: &str = "Select the fastest remote Monero node";
|
||||||
pub const P2POOL_SELECT_RANDOM: &str = "Select a random remote Monero node";
|
pub const P2POOL_SELECT_RANDOM: &str = "Select a random remote Monero node";
|
||||||
pub const P2POOL_SELECT_LAST: &str = "Select the previous remote Monero node";
|
pub const P2POOL_SELECT_LAST: &str = "Select the previous remote Monero node";
|
||||||
|
|
Loading…
Reference in a new issue