mirror of
https://github.com/Cyrix126/gupaxx.git
synced 2024-11-17 07:47:35 +00:00
update/ping: consolidate code in *::spawn_thread()
This commit is contained in:
parent
d0ced90bb1
commit
f2852549c2
6 changed files with 188 additions and 207 deletions
|
@ -59,16 +59,19 @@ pub fn get_gupax_data_path() -> Result<PathBuf, TomlError> {
|
||||||
Some(mut path) => {
|
Some(mut path) => {
|
||||||
path.push(DIRECTORY);
|
path.push(DIRECTORY);
|
||||||
info!("OS | Data path ... OK ... {}", path.display());
|
info!("OS | Data path ... OK ... {}", path.display());
|
||||||
|
create_gupax_dir(&path)?;
|
||||||
Ok(path)
|
Ok(path)
|
||||||
},
|
},
|
||||||
None => { error!("OS | Data path ... FAIL"); Err(TomlError::Path(PATH_ERROR.to_string())) },
|
None => { error!("OS | Data path ... FAIL"); Err(TomlError::Path(PATH_ERROR.to_string())) },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_gupax_dir(path: PathBuf) -> Result<(), TomlError> {
|
pub fn create_gupax_dir(path: &PathBuf) -> Result<(), TomlError> {
|
||||||
// Create directory
|
// Create directory
|
||||||
fs::create_dir_all(&path)?;
|
match fs::create_dir_all(path) {
|
||||||
Ok(())
|
Ok(_) => { info!("OS | Create data path ... OK"); Ok(()) },
|
||||||
|
Err(e) => { error!("OS | Create data path ... FAIL ... {}", e); Err(TomlError::Io(e)) },
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert a [File] path to a [String]
|
// Convert a [File] path to a [String]
|
||||||
|
|
29
src/gupax.rs
29
src/gupax.rs
|
@ -71,34 +71,7 @@ impl Gupax {
|
||||||
ui.vertical(|ui| {
|
ui.vertical(|ui| {
|
||||||
ui.set_enabled(!updating);
|
ui.set_enabled(!updating);
|
||||||
if ui.add_sized([width, height], egui::Button::new("Check for updates")).on_hover_text(GUPAX_UPDATE).clicked() {
|
if ui.add_sized([width, height], egui::Button::new("Check for updates")).on_hover_text(GUPAX_UPDATE).clicked() {
|
||||||
update.lock().unwrap().path_p2pool = og.lock().unwrap().gupax.absolute_p2pool_path.display().to_string();
|
Update::spawn_thread(og, update, state_ver, state_path);
|
||||||
update.lock().unwrap().path_xmrig = og.lock().unwrap().gupax.absolute_xmrig_path.display().to_string();
|
|
||||||
update.lock().unwrap().tor = og.lock().unwrap().gupax.update_via_tor;
|
|
||||||
let og = Arc::clone(&og);
|
|
||||||
let state_ver = Arc::clone(&state_ver);
|
|
||||||
let update = Arc::clone(&update);
|
|
||||||
let update_thread = Arc::clone(&update);
|
|
||||||
let state_path = state_path.clone();
|
|
||||||
thread::spawn(move|| {
|
|
||||||
info!("Spawning update thread...");
|
|
||||||
match Update::start(update_thread, og.clone(), state_ver.clone()) {
|
|
||||||
Err(e) => {
|
|
||||||
info!("Update ... FAIL ... {}", e);
|
|
||||||
*update.lock().unwrap().msg.lock().unwrap() = format!("{} | {}", MSG_FAILED, e);
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
info!("Update | Saving state...");
|
|
||||||
match State::save(&mut og.lock().unwrap(), &state_path) {
|
|
||||||
Ok(_) => info!("Update ... OK"),
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Update | Saving state ... FAIL ... {}", e);
|
|
||||||
*update.lock().unwrap().msg.lock().unwrap() = format!("Saving new versions into state failed");
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
*update.lock().unwrap().updating.lock().unwrap() = false;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
ui.vertical(|ui| {
|
ui.vertical(|ui| {
|
||||||
|
|
41
src/main.rs
41
src/main.rs
|
@ -440,6 +440,7 @@ fn init_options() -> NativeOptions {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_auto(app: &App) {
|
fn init_auto(app: &App) {
|
||||||
|
// Return early if [--no-startup] was not passed
|
||||||
if app.no_startup {
|
if app.no_startup {
|
||||||
info!("[--no-startup] flag passed, skipping init_auto()...");
|
info!("[--no-startup] flag passed, skipping init_auto()...");
|
||||||
return
|
return
|
||||||
|
@ -449,39 +450,10 @@ fn init_auto(app: &App) {
|
||||||
} else {
|
} else {
|
||||||
info!("Starting init_auto()...");
|
info!("Starting init_auto()...");
|
||||||
}
|
}
|
||||||
|
|
||||||
// [Auto-Update]
|
// [Auto-Update]
|
||||||
if app.state.gupax.auto_update {
|
if app.state.gupax.auto_update {
|
||||||
let path_p2pool = app.og.lock().unwrap().gupax.absolute_p2pool_path.display().to_string();
|
Update::spawn_thread(&app.og, &app.update, &app.state.version, &app.state_path);
|
||||||
let path_xmrig = app.og.lock().unwrap().gupax.absolute_xmrig_path.display().to_string();
|
|
||||||
let tor = app.og.lock().unwrap().gupax.update_via_tor;
|
|
||||||
app.update.lock().unwrap().path_p2pool = path_p2pool;
|
|
||||||
app.update.lock().unwrap().path_xmrig = path_xmrig;
|
|
||||||
app.update.lock().unwrap().tor = tor;
|
|
||||||
let og = Arc::clone(&app.og);
|
|
||||||
let state_ver = Arc::clone(&app.state.version);
|
|
||||||
let update = Arc::clone(&app.update);
|
|
||||||
let update_thread = Arc::clone(&app.update);
|
|
||||||
let state_path = app.state_path.clone();
|
|
||||||
thread::spawn(move|| {
|
|
||||||
info!("Spawning update thread...");
|
|
||||||
match Update::start(update_thread, og.clone(), state_ver.clone()) {
|
|
||||||
Err(e) => {
|
|
||||||
info!("Update ... FAIL ... {}", e);
|
|
||||||
*update.lock().unwrap().msg.lock().unwrap() = format!("{} | {}", MSG_FAILED, e);
|
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
info!("Update | Saving state...");
|
|
||||||
match State::save(&mut og.lock().unwrap(), &state_path) {
|
|
||||||
Ok(_) => info!("Update ... OK"),
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Update | Saving state ... FAIL ... {}", e);
|
|
||||||
*update.lock().unwrap().msg.lock().unwrap() = format!("Saving new versions into state failed");
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
*update.lock().unwrap().updating.lock().unwrap() = false;
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
info!("Skipping auto-update...");
|
info!("Skipping auto-update...");
|
||||||
}
|
}
|
||||||
|
@ -490,12 +462,7 @@ fn init_auto(app: &App) {
|
||||||
let auto_node = app.og.lock().unwrap().p2pool.auto_node;
|
let auto_node = app.og.lock().unwrap().p2pool.auto_node;
|
||||||
let simple = app.og.lock().unwrap().p2pool.simple;
|
let simple = app.og.lock().unwrap().p2pool.simple;
|
||||||
if auto_node && simple {
|
if auto_node && simple {
|
||||||
let ping = Arc::clone(&app.ping);
|
Ping::spawn_thread(&app.ping, &app.og)
|
||||||
let og = Arc::clone(&app.og);
|
|
||||||
thread::spawn(move|| {
|
|
||||||
info!("Spawning ping thread...");
|
|
||||||
crate::node::ping(ping, og);
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
info!("Skipping auto-ping...");
|
info!("Skipping auto-ping...");
|
||||||
}
|
}
|
||||||
|
|
264
src/node.rs
264
src/node.rs
|
@ -90,32 +90,6 @@ impl NodeData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Ping data
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Ping {
|
|
||||||
pub nodes: Vec<NodeData>,
|
|
||||||
pub fastest: NodeEnum,
|
|
||||||
pub pinging: bool,
|
|
||||||
pub msg: String,
|
|
||||||
pub prog: f32,
|
|
||||||
pub pinged: bool,
|
|
||||||
pub auto_selected: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ping {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
nodes: NodeData::new_vec(),
|
|
||||||
fastest: NodeEnum::C3pool,
|
|
||||||
pinging: false,
|
|
||||||
msg: "No ping in progress".to_string(),
|
|
||||||
prog: 0.0,
|
|
||||||
pinged: false,
|
|
||||||
auto_selected: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- IP <-> Enum functions
|
//---------------------------------------------------------------------------------------------------- IP <-> Enum functions
|
||||||
use crate::NodeEnum::*;
|
use crate::NodeEnum::*;
|
||||||
// Function for returning IP/Enum
|
// Function for returning IP/Enum
|
||||||
|
@ -191,110 +165,154 @@ pub fn format_enum(id: NodeEnum) -> String {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Main Ping function
|
//---------------------------------------------------------------------------------------------------- Ping data
|
||||||
// This is for pinging the community nodes to
|
#[derive(Debug)]
|
||||||
// find the fastest/slowest one for the user.
|
pub struct Ping {
|
||||||
// The process:
|
pub nodes: Vec<NodeData>,
|
||||||
// - Send [get_info] JSON-RPC request over HTTP to all IPs
|
pub fastest: NodeEnum,
|
||||||
// - Measure each request in milliseconds
|
pub pinging: bool,
|
||||||
// - Timeout on requests over 5 seconds
|
pub msg: String,
|
||||||
// - Add data to appropriate struct
|
pub prog: f32,
|
||||||
// - Sorting fastest to lowest is automatic (fastest nodes return ... the fastest)
|
pub pinged: bool,
|
||||||
//
|
pub auto_selected: bool,
|
||||||
// This used to be done 3x linearly but after testing, sending a single
|
}
|
||||||
// JSON-RPC call to all IPs asynchronously resulted in the same data.
|
|
||||||
//
|
|
||||||
// <300ms = GREEN
|
|
||||||
// <1000ms = YELLOW
|
|
||||||
// >1000ms = RED
|
|
||||||
// timeout = BLACK
|
|
||||||
// default = GRAY
|
|
||||||
#[tokio::main]
|
|
||||||
pub async fn ping(ping: Arc<Mutex<Ping>>, og: Arc<Mutex<State>>) -> Result<(), anyhow::Error> {
|
|
||||||
// Timer
|
|
||||||
let now = Instant::now();
|
|
||||||
|
|
||||||
// Start ping
|
impl Ping {
|
||||||
ping.lock().unwrap().pinging = true;
|
pub fn new() -> Self {
|
||||||
ping.lock().unwrap().prog = 0.0;
|
Self {
|
||||||
let percent = (100.0 / ((NODE_IPS.len()) as f32)).floor();
|
nodes: NodeData::new_vec(),
|
||||||
|
fastest: NodeEnum::C3pool,
|
||||||
|
pinging: false,
|
||||||
|
msg: "No ping in progress".to_string(),
|
||||||
|
prog: 0.0,
|
||||||
|
pinged: false,
|
||||||
|
auto_selected: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create HTTP client
|
//---------------------------------------------------------------------------------------------------- Main Ping function
|
||||||
let info = format!("{}", "Creating HTTP Client");
|
// Intermediate thread for spawning thread
|
||||||
ping.lock().unwrap().msg = info;
|
pub fn spawn_thread(ping: &Arc<Mutex<Self>>, og: &Arc<Mutex<State>>) {
|
||||||
let client: hyper::client::Client<hyper::client::HttpConnector> = hyper::Client::builder()
|
|
||||||
.build(hyper::client::HttpConnector::new());
|
|
||||||
|
|
||||||
// Random User Agent
|
|
||||||
let rand_user_agent = crate::Pkg::get_user_agent();
|
|
||||||
// Handle vector
|
|
||||||
let mut handles = vec![];
|
|
||||||
let node_vec = Arc::new(Mutex::new(Vec::new()));
|
|
||||||
|
|
||||||
for ip in NODE_IPS {
|
|
||||||
let client = client.clone();
|
|
||||||
let ping = Arc::clone(&ping);
|
let ping = Arc::clone(&ping);
|
||||||
let node_vec = Arc::clone(&node_vec);
|
let og = Arc::clone(og);
|
||||||
let request = hyper::Request::builder()
|
std::thread::spawn(move|| {
|
||||||
.method("POST")
|
info!("Spawning ping thread...");
|
||||||
.uri("http://".to_string() + ip + "/json_rpc")
|
match Self::ping(ping.clone(), og) {
|
||||||
.header("User-Agent", rand_user_agent)
|
Ok(_) => info!("Ping ... OK"),
|
||||||
.body(hyper::Body::from(r#"{"jsonrpc":"2.0","id":"0","method":"get_info"}"#))
|
Err(err) => {
|
||||||
.unwrap();
|
error!("Ping ... FAIL ... {}", err);
|
||||||
let handle = tokio::spawn(async move { response(client, request, ip, ping, percent, node_vec).await });
|
ping.lock().unwrap().pinged = false;
|
||||||
handles.push(handle);
|
ping.lock().unwrap().msg = err.to_string();
|
||||||
|
},
|
||||||
|
};
|
||||||
|
ping.lock().unwrap().pinging = false;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for handle in handles {
|
// This is for pinging the community nodes to
|
||||||
handle.await?;
|
// find the fastest/slowest one for the user.
|
||||||
}
|
// The process:
|
||||||
let node_vec = node_vec.lock().unwrap().clone();
|
// - Send [get_info] JSON-RPC request over HTTP to all IPs
|
||||||
|
// - Measure each request in milliseconds
|
||||||
|
// - Timeout on requests over 5 seconds
|
||||||
|
// - Add data to appropriate struct
|
||||||
|
// - Sorting fastest to lowest is automatic (fastest nodes return ... the fastest)
|
||||||
|
//
|
||||||
|
// This used to be done 3x linearly but after testing, sending a single
|
||||||
|
// JSON-RPC call to all IPs asynchronously resulted in the same data.
|
||||||
|
//
|
||||||
|
// <300ms = GREEN
|
||||||
|
// <1000ms = YELLOW
|
||||||
|
// >1000ms = RED
|
||||||
|
// timeout = BLACK
|
||||||
|
// default = GRAY
|
||||||
|
#[tokio::main]
|
||||||
|
pub async fn ping(ping: Arc<Mutex<Self>>, og: Arc<Mutex<State>>) -> Result<(), anyhow::Error> {
|
||||||
|
// Timer
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
let info = format!("Fastest node: {}ms ... {} @ {}", node_vec[0].ms, node_vec[0].id, node_vec[0].ip);
|
// Start ping
|
||||||
info!("Ping | {}", info);
|
ping.lock().unwrap().pinging = true;
|
||||||
info!("Ping | Took [{}] seconds...", now.elapsed().as_secs_f32());
|
ping.lock().unwrap().prog = 0.0;
|
||||||
let mut ping = ping.lock().unwrap();
|
let percent = (100.0 / ((NODE_IPS.len()) as f32)).floor();
|
||||||
ping.fastest = node_vec[0].id;
|
|
||||||
ping.nodes = node_vec;
|
// Create HTTP client
|
||||||
ping.prog = 100.0;
|
let info = format!("{}", "Creating HTTP Client");
|
||||||
|
ping.lock().unwrap().msg = info;
|
||||||
|
let client: hyper::client::Client<hyper::client::HttpConnector> = hyper::Client::builder()
|
||||||
|
.build(hyper::client::HttpConnector::new());
|
||||||
|
|
||||||
|
// Random User Agent
|
||||||
|
let rand_user_agent = crate::Pkg::get_user_agent();
|
||||||
|
// Handle vector
|
||||||
|
let mut handles = vec![];
|
||||||
|
let node_vec = Arc::new(Mutex::new(Vec::new()));
|
||||||
|
|
||||||
|
for ip in NODE_IPS {
|
||||||
|
let client = client.clone();
|
||||||
|
let ping = Arc::clone(&ping);
|
||||||
|
let node_vec = Arc::clone(&node_vec);
|
||||||
|
let request = hyper::Request::builder()
|
||||||
|
.method("POST")
|
||||||
|
.uri("http://".to_string() + ip + "/json_rpc")
|
||||||
|
.header("User-Agent", rand_user_agent)
|
||||||
|
.body(hyper::Body::from(r#"{"jsonrpc":"2.0","id":"0","method":"get_info"}"#))
|
||||||
|
.unwrap();
|
||||||
|
let handle = tokio::spawn(async move { Self::response(client, request, ip, ping, percent, node_vec).await });
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await?;
|
||||||
|
}
|
||||||
|
let node_vec = node_vec.lock().unwrap().clone();
|
||||||
|
|
||||||
|
let info = format!("Fastest node: {}ms ... {} @ {}", node_vec[0].ms, node_vec[0].id, node_vec[0].ip);
|
||||||
|
info!("Ping | {}", info);
|
||||||
|
info!("Ping | Took [{}] seconds...", now.elapsed().as_secs_f32());
|
||||||
|
let mut ping = ping.lock().unwrap();
|
||||||
|
ping.fastest = node_vec[0].id;
|
||||||
|
ping.nodes = node_vec;
|
||||||
|
ping.prog = 100.0;
|
||||||
|
ping.msg = info;
|
||||||
|
ping.pinging = false;
|
||||||
|
ping.pinged = true;
|
||||||
|
ping.auto_selected = false;
|
||||||
|
drop(ping);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn response(client: hyper::client::Client<hyper::client::HttpConnector>, request: hyper::Request<hyper::Body>, ip: &'static str, ping: Arc<Mutex<Self>>, percent: f32, node_vec: Arc<Mutex<Vec<NodeData>>>) {
|
||||||
|
let id = ip_to_enum(ip);
|
||||||
|
let now = Instant::now();
|
||||||
|
let ms;
|
||||||
|
let info;
|
||||||
|
match tokio::time::timeout(Duration::from_secs(5), client.request(request)).await {
|
||||||
|
Ok(_) => {
|
||||||
|
ms = now.elapsed().as_millis();
|
||||||
|
info = format!("{}ms ... {}: {}", ms, id, ip);
|
||||||
|
info!("Ping | {}", info)
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
ms = 5000;
|
||||||
|
info = format!("{}ms ... {}: {}", ms, id, ip);
|
||||||
|
warn!("Ping | {}", info)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
let color;
|
||||||
|
if ms < 300 {
|
||||||
|
color = Color32::from_rgb(100, 230, 100); // GREEN
|
||||||
|
} else if ms < 1000 {
|
||||||
|
color = Color32::from_rgb(230, 230, 100); // YELLOW
|
||||||
|
} else if ms < 5000 {
|
||||||
|
color = Color32::from_rgb(230, 50, 50); // RED
|
||||||
|
} else {
|
||||||
|
color = Color32::BLACK;
|
||||||
|
}
|
||||||
|
let mut ping = ping.lock().unwrap();
|
||||||
ping.msg = info;
|
ping.msg = info;
|
||||||
ping.pinging = false;
|
ping.prog += percent;
|
||||||
ping.pinged = true;
|
|
||||||
ping.auto_selected = false;
|
|
||||||
drop(ping);
|
drop(ping);
|
||||||
Ok(())
|
node_vec.lock().unwrap().push(NodeData { id: ip_to_enum(ip), ip, ms, color, });
|
||||||
}
|
|
||||||
|
|
||||||
async fn response(client: hyper::client::Client<hyper::client::HttpConnector>, request: hyper::Request<hyper::Body>, ip: &'static str, ping: Arc<Mutex<Ping>>, percent: f32, node_vec: Arc<Mutex<Vec<NodeData>>>) {
|
|
||||||
let id = ip_to_enum(ip);
|
|
||||||
let now = Instant::now();
|
|
||||||
let ms;
|
|
||||||
let info;
|
|
||||||
match tokio::time::timeout(Duration::from_secs(5), client.request(request)).await {
|
|
||||||
Ok(_) => {
|
|
||||||
ms = now.elapsed().as_millis();
|
|
||||||
info = format!("{}ms ... {}: {}", ms, id, ip);
|
|
||||||
info!("Ping | {}", info)
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
ms = 5000;
|
|
||||||
info = format!("{}ms ... {}: {}", ms, id, ip);
|
|
||||||
warn!("Ping | {}", info)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
let color;
|
|
||||||
if ms < 300 {
|
|
||||||
color = Color32::from_rgb(100, 230, 100); // GREEN
|
|
||||||
} else if ms < 1000 {
|
|
||||||
color = Color32::from_rgb(230, 230, 100); // YELLOW
|
|
||||||
} else if ms < 5000 {
|
|
||||||
color = Color32::from_rgb(230, 50, 50); // RED
|
|
||||||
} else {
|
|
||||||
color = Color32::BLACK;
|
|
||||||
}
|
}
|
||||||
let mut ping = ping.lock().unwrap();
|
|
||||||
ping.msg = info;
|
|
||||||
ping.prog += percent;
|
|
||||||
drop(ping);
|
|
||||||
node_vec.lock().unwrap().push(NodeData { id: ip_to_enum(ip), ip, ms, color, });
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,21 +141,7 @@ impl P2pool {
|
||||||
// [Ping Button]
|
// [Ping Button]
|
||||||
ui.set_enabled(!ping.lock().unwrap().pinging);
|
ui.set_enabled(!ping.lock().unwrap().pinging);
|
||||||
if ui.add_sized([width, height], Button::new("Ping community nodes")).on_hover_text(P2POOL_PING).clicked() {
|
if ui.add_sized([width, height], Button::new("Ping community nodes")).on_hover_text(P2POOL_PING).clicked() {
|
||||||
let ping1 = Arc::clone(&ping);
|
Ping::spawn_thread(&ping, &og);
|
||||||
let ping2 = Arc::clone(&ping);
|
|
||||||
let og = Arc::clone(og);
|
|
||||||
thread::spawn(move|| {
|
|
||||||
info!("Spawning ping thread...");
|
|
||||||
match crate::node::ping(ping1, og) {
|
|
||||||
Ok(_) => info!("Ping ... OK"),
|
|
||||||
Err(err) => {
|
|
||||||
error!("Ping ... FAIL ... {}", err);
|
|
||||||
ping2.lock().unwrap().pinged = false;
|
|
||||||
ping2.lock().unwrap().msg = err.to_string();
|
|
||||||
},
|
|
||||||
};
|
|
||||||
ping2.lock().unwrap().pinging = false;
|
|
||||||
});
|
|
||||||
}});
|
}});
|
||||||
|
|
||||||
ui.vertical(|ui| {
|
ui.vertical(|ui| {
|
||||||
|
|
|
@ -288,6 +288,41 @@ impl Update {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Intermediate function that spawns a new thread
|
||||||
|
// which starts the async [start()] function that
|
||||||
|
// actually contains the code. This is so that everytime
|
||||||
|
// an update needs to happen (Gupax tab, auto-update), the
|
||||||
|
// code only needs to be edited once, here.
|
||||||
|
pub fn spawn_thread(og: &Arc<Mutex<State>>, update: &Arc<Mutex<Update>>, state_ver: &Arc<Mutex<Version>>, state_path: &PathBuf) {
|
||||||
|
update.lock().unwrap().path_p2pool = og.lock().unwrap().gupax.absolute_p2pool_path.display().to_string();
|
||||||
|
update.lock().unwrap().path_xmrig = og.lock().unwrap().gupax.absolute_xmrig_path.display().to_string();
|
||||||
|
update.lock().unwrap().tor = og.lock().unwrap().gupax.update_via_tor;
|
||||||
|
let og = Arc::clone(&og);
|
||||||
|
let state_ver = Arc::clone(&state_ver);
|
||||||
|
let update = Arc::clone(&update);
|
||||||
|
let state_path = state_path.clone();
|
||||||
|
std::thread::spawn(move|| {
|
||||||
|
info!("Spawning update thread...");
|
||||||
|
match Update::start(update.clone(), og.clone(), state_ver.clone()) {
|
||||||
|
Err(e) => {
|
||||||
|
info!("Update ... FAIL ... {}", e);
|
||||||
|
*update.lock().unwrap().msg.lock().unwrap() = format!("{} | {}", MSG_FAILED, e);
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
info!("Update | Saving state...");
|
||||||
|
match State::save(&mut og.lock().unwrap(), &state_path) {
|
||||||
|
Ok(_) => info!("Update ... OK"),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Update | Saving state ... FAIL ... {}", e);
|
||||||
|
*update.lock().unwrap().msg.lock().unwrap() = format!("Saving new versions into state failed");
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
*update.lock().unwrap().updating.lock().unwrap() = false;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Download process:
|
// Download process:
|
||||||
// 0. setup tor, client, http, etc
|
// 0. setup tor, client, http, etc
|
||||||
// 1. fill vector with all enums
|
// 1. fill vector with all enums
|
||||||
|
@ -295,7 +330,6 @@ impl Update {
|
||||||
// 3. if current == version, remove from vec
|
// 3. if current == version, remove from vec
|
||||||
// 4. loop over vec, download links
|
// 4. loop over vec, download links
|
||||||
// 5. extract, upgrade
|
// 5. extract, upgrade
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
pub async fn start(update: Arc<Mutex<Self>>, og: Arc<Mutex<State>>, state_ver: Arc<Mutex<Version>>) -> Result<(), anyhow::Error> {
|
pub async fn start(update: Arc<Mutex<Self>>, og: Arc<Mutex<State>>, state_ver: Arc<Mutex<Version>>) -> Result<(), anyhow::Error> {
|
||||||
//---------------------------------------------------------------------------------------------------- Init
|
//---------------------------------------------------------------------------------------------------- Init
|
||||||
|
|
Loading…
Reference in a new issue