node: only add simple nodes to backup lists if green/yellow

This avoids adding potential dead nodes to the list and causing
P2Pool to abort. If no ping data is available, no backup nodes
are added.
This commit is contained in:
hinto.janai 2023-11-26 16:19:52 -05:00
parent 19ed7c721d
commit 42e67d940e
No known key found for this signature in database
GPG key ID: D47CE05FA175A499
5 changed files with 76 additions and 40 deletions

View file

@ -277,7 +277,11 @@ pub const P2POOL_IN: &str = "How many in-bound peers to allow? (othe
pub const P2POOL_LOG: &str = "Verbosity of the console log";
pub const P2POOL_AUTO_NODE: &str = "Automatically ping the remote Monero nodes at Gupax startup";
pub const P2POOL_AUTO_SELECT: &str = "Automatically select the fastest remote Monero node after pinging";
pub const P2POOL_BACKUP_HOST: &str = "Automatically switch to the other nodes listed if the current one is down";
pub const P2POOL_BACKUP_HOST_SIMPLE: &str =
r#"Automatically switch to the other nodes listed if the current one is down.
Note: you must ping the remote nodes or this feature will default to only using the currently selected node."#;
pub const P2POOL_BACKUP_HOST_ADVANCED: &str = "Automatically switch to the other nodes in your list if the current one is down.";
pub const P2POOL_SELECT_FASTEST: &str = "Select the fastest remote Monero node";
pub const P2POOL_SELECT_RANDOM: &str = "Select a random remote Monero node";
pub const P2POOL_SELECT_LAST: &str = "Select the previous remote Monero node";

View file

@ -538,36 +538,65 @@ impl App {
return None;
}
if self.state.p2pool.simple {
let mut ip = lock!(self.ping).fastest.to_string();
// INVARIANT:
// We must ensure all nodes are capable of
// sending/receiving valid JSON-RPC requests.
//
// This is done during the `Ping` phase, meaning
// all the nodes listed in our `self.ping` should
// have ping data. We can use this data to filter
// out "dead" nodes.
//
// The user must have at least pinged once so that
// we actually have this data to work off of, else,
// this "backup host" feature will return here
// with 0 extra nodes as we can't be sure that any
// of them are actually online.
//
// Realistically, most of them are, but we can't be sure,
// and checking here without explicitly asking the user
// to connect to nodes is a no-go (also, non-async environment).
if !lock!(self.ping).pinged {
warn!("Backup hosts ... simple node backup: no ping data available, returning None");
return None;
}
if self.state.p2pool.simple {
let mut vec = Vec::with_capacity(REMOTE_NODES.len());
for _ in 0..REMOTE_NODES.len() {
let (ip_new, rpc, zmq) = RemoteNode::get_ip_rpc_zmq(&ip);
// Locking during this entire loop should be fine,
// only a few nodes to iter through.
for pinged_node in lock!(self.ping).nodes.iter() {
// Continue if this node is not green/yellow.
if pinged_node.ms > crate::node::RED_NODE_PING {
continue;
}
let (ip, rpc, zmq) = RemoteNode::get_ip_rpc_zmq(&pinged_node.ip);
let node = Node {
ip: ip_new.into(),
ip: ip.into(),
rpc: rpc.into(),
zmq: zmq.into(),
};
vec.push(node);
ip = RemoteNode::get_next_from_ping(ip_new, &lock!(self.ping).nodes);
}
return Some(vec);
if vec.is_empty() {
warn!("Backup hosts ... simple node backup: no viable nodes found");
None
} else {
info!("Backup hosts ... simple node backup list: {vec:#?}");
Some(vec)
}
if !self.state.p2pool.simple {
return Some(self.node_vec
} else {
Some(self.node_vec
.iter()
.map(|(_, node)| node.clone())
.collect()
);
)
}
None
}
}

View file

@ -214,6 +214,11 @@ pub fn format_ip(ip: &str) -> String {
}
//---------------------------------------------------------------------------------------------------- Node data
pub const GREEN_NODE_PING: u128 = 300;
// yellow is anything in-between green/red
pub const RED_NODE_PING: u128 = 500;
pub const TIMEOUT_NODE_PING: u128 = 5000;
#[derive(Debug, Clone)]
pub struct NodeData {
pub ip: &'static str,
@ -387,8 +392,6 @@ impl Ping {
percent: f32,
node_vec: Arc<Mutex<Vec<NodeData>>>
) {
const DEAD_NODE_PING: u128 = 5000;
let ms;
let now = Instant::now();
@ -402,30 +405,30 @@ impl Ping {
if rpc.result.mainnet && rpc.result.synchronized {
ms = now.elapsed().as_millis();
} else {
ms = DEAD_NODE_PING;
ms = TIMEOUT_NODE_PING;
warn!("Ping | {ip} responded with valid get_info but is not in sync, remove this node!");
}
}
_ => {
ms = DEAD_NODE_PING;
ms = TIMEOUT_NODE_PING;
warn!("Ping | {ip} responded but with invalid get_info, remove this node!");
}
}
},
_ => ms = DEAD_NODE_PING,
_ => ms = TIMEOUT_NODE_PING,
};
},
_ => ms = DEAD_NODE_PING,
_ => ms = TIMEOUT_NODE_PING,
};
let info = format!("{ms}ms ... {ip}");
info!("Ping | {ms}ms ... {ip}");
let color = if ms < 300 {
let color = if ms < GREEN_NODE_PING {
GREEN
} else if ms < 500 {
} else if ms < RED_NODE_PING {
YELLOW
} else if ms < DEAD_NODE_PING {
} else if ms < TIMEOUT_NODE_PING {
RED
} else {
BLACK

View file

@ -231,7 +231,7 @@ impl crate::disk::P2pool {
ui.add_sized([width, height], Checkbox::new(&mut self.auto_ping, "Auto-ping")).on_hover_text(P2POOL_AUTO_NODE);
ui.separator();
// [Backup host]
ui.add_sized([width, height], Checkbox::new(&mut self.backup_host, "Backup host")).on_hover_text(P2POOL_BACKUP_HOST);
ui.add_sized([width, height], Checkbox::new(&mut self.backup_host, "Backup host")).on_hover_text(P2POOL_BACKUP_HOST_SIMPLE);
})});
debug!("P2Pool Tab | Rendering warning text");
@ -494,7 +494,7 @@ impl crate::disk::P2pool {
let width = width - SPACE;
let height = ui.available_height() / 3.0;
// [Backup host]
ui.add_sized([width, height], Checkbox::new(&mut self.backup_host, "Backup host")).on_hover_text(P2POOL_BACKUP_HOST);
ui.add_sized([width, height], Checkbox::new(&mut self.backup_host, "Backup host")).on_hover_text(P2POOL_BACKUP_HOST_ADVANCED);
});
}
}

View file

@ -114,7 +114,7 @@ impl crate::disk::Xmrig {
ui.vertical(|ui| {
let width = width / 10.0;
let text_width = width * 2.4;
ui.spacing_mut().slider_width = width * 7.1;
ui.spacing_mut().slider_width = width * 6.5;
ui.spacing_mut().icon_width = width / 25.0;
ui.horizontal(|ui| {
ui.add_sized([text_width, text_edit], Label::new(format!("Threads [1-{}]:", self.max_threads)));